org.apache.hadoop.hbase.HColumnDescriptor Java Examples

The following examples show how to use org.apache.hadoop.hbase.HColumnDescriptor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static Table createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return testUtil.getConnection().getTable(TableName.valueOf(tableName));
}
 
Example #2
Source File: RemoteDictionaryStore.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public void init(String[] cfs) throws IOException {
    logger.debug("Checking streaming remote store for {} at {}.", tableName, String.join(", ", cfs));
    Connection conn = getConnection();
    Admin admin = conn.getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(hbaseTableName));
    for (String family : cfs) {
        HColumnDescriptor fd = new HColumnDescriptor(family);
        desc.addFamily(fd);
    }
    DistributedLock lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentProcess();
    try {
        boolean locked = lock.lock(lockPath());
        if (locked && !admin.tableExists(TableName.valueOf(hbaseTableName))) {
            logger.info("Create htable with {}.", desc);
            admin.createTable(desc);
        } else {
            logger.info("Table exists or cannot fetch lock {}", desc);
        }
    } finally {
        admin.close();
        if (lock != null && lock.isLockedByMe(lockPath())) {
            lock.unlock(lockPath());
        }
    }
    table = conn.getTable(TableName.valueOf(hbaseTableName));
}
 
Example #3
Source File: HBaseUtils.java    From aws-big-data-blog with Apache License 2.0 6 votes vote down vote up
/**
 * Helper method to create an HBase table in an Amazon EMR cluster with HBase installed
 * 
 * @param tableName - name for table to create
 * @param dnsId - Amazon EMR master node public DNS
 * @param hbaseRestPort - HBase Rest port
 */
public static void createTable(String tableName, String dnsId, int hbaseRestPort) {
	Configuration config = HBaseConfiguration.create();
	RemoteAdmin admin = new RemoteAdmin(new Client(new Cluster().add(dnsId, hbaseRestPort)), config);
	String [] families = {"user", "address", "contact", "likes"};
	try {
		if (admin.isTableAvailable(tableName)) {
			LOG.info("table already exists!");
			return;
		} else {
			HTableDescriptor tableDesc = new HTableDescriptor(tableName);
			for (int i = 0; i < families.length; i++) {
				tableDesc.addFamily(new HColumnDescriptor(families[i]));
			}
			admin.createTable(tableDesc);
			isTableAvailable = true;
			LOG.info("create table " + tableName + " ok.");
		} 

	} catch (IOException e) {
		LOG.error(e, e.getCause()); 
	}

}
 
Example #4
Source File: AlterTableIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddColumnForNewColumnFamily() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE testAddColumnForNewColumnFamily (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) SALT_BUCKETS = 8";
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.createStatement().execute(ddl);
    ddl = "ALTER TABLE testAddColumnForNewColumnFamily ADD CF.STRING VARCHAR";
    conn1.createStatement().execute(ddl);
    try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes("testAddColumnForNewColumnFamily".toUpperCase())).getColumnFamilies();
        assertEquals(2, columnFamilies.length);
        assertEquals("0", columnFamilies[0].getNameAsString());
        assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[0].getTimeToLive());
        assertEquals("CF", columnFamilies[1].getNameAsString());
        assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[1].getTimeToLive());
    }
}
 
Example #5
Source File: TestRegionReplicaFailover.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception {
  Configuration conf = HTU.getConfiguration();
 // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true);
  conf.setInt("replication.stats.thread.period.seconds", 5);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);

  HTU.startMiniCluster(NB_SERVERS);
  htd = HTU.createTableDescriptor(
    TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().length()-3)),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  htd.setRegionReplication(3);
  HTU.getAdmin().createTable(htd);
}
 
Example #6
Source File: HFileOutputFormat3.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to bloom type map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    StringBuilder bloomTypeConfigValue = new StringBuilder();
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            bloomTypeConfigValue.append('&');
        }
        bloomTypeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        bloomTypeConfigValue.append('=');
        String bloomType = familyDescriptor.getBloomFilterType().toString();
        if (bloomType == null) {
            bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
        }
        bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
    }
    conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
}
 
Example #7
Source File: HFileOutputFormat3.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
Example #8
Source File: HBaseTestingClusterAutoStarter.java    From flink with Apache License 2.0 6 votes vote down vote up
protected static void createTable(TableName tableName, byte[][] columnFamilyName, byte[][] splitKeys) {
	LOG.info("HBase minicluster: Creating table " + tableName.getNameAsString());

	assertNotNull("HBaseAdmin is not initialized successfully.", admin);
	HTableDescriptor desc = new HTableDescriptor(tableName);
	for (byte[] fam : columnFamilyName) {
		HColumnDescriptor colDef = new HColumnDescriptor(fam);
		desc.addFamily(colDef);
	}

	try {
		admin.createTable(desc, splitKeys);
		createdTables.add(tableName);
		assertTrue("Fail to create the table", admin.tableExists(tableName));
	} catch (IOException e) {
		assertNull("Exception found while creating table", e);
	}
}
 
Example #9
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {} since it already exists.",
                stateTable.getNameWithNamespaceInclAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {} since it already exists.",
              stateTable.getNameWithNamespaceInclAsString(), ex);
  }
}
 
Example #10
Source File: HBaseIOIT.java    From beam with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws IOException {
  PipelineOptionsFactory.register(HBasePipelineOptions.class);
  options = TestPipeline.testingPipelineOptions().as(HBasePipelineOptions.class);

  numberOfRows = options.getNumberOfRecords();

  conf.setStrings("hbase.zookeeper.quorum", options.getHbaseServerName());
  conf.setStrings("hbase.cluster.distributed", "true");
  conf.setStrings("hbase.client.retries.number", "1");

  Connection connection = ConnectionFactory.createConnection(conf);

  admin = connection.getAdmin();
  HTableDescriptor testTable =
      new HTableDescriptor(TableName.valueOf(TABLE_NAME))
          .addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  admin.createTable(testTable);
}
 
Example #11
Source File: Main.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
Example #12
Source File: HBaseUtil.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
/**
 * 创建HBase表
 *
 * @param tableName 表名
 * @param cfs       列族的数组
 * @return 是否创建成功
 */
public static boolean createTable(String tableName, String[] cfs) {
    try (HBaseAdmin admin = (HBaseAdmin) HBaseConn.getHBaseConn().getAdmin()) {
        if (admin.tableExists(tableName)) {
            return false;
        }
        HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
        Arrays.stream(cfs).forEach(cf -> {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf);
            columnDescriptor.setMaxVersions(1);
            tableDescriptor.addFamily(columnDescriptor);
        });
        admin.createTable(tableDescriptor);
    } catch (Exception e) {
        e.printStackTrace();
    }
    return true;
}
 
Example #13
Source File: BigtableHelloWorld.java    From cloud-bigtable-examples with Apache License 2.0 6 votes vote down vote up
public static String create(Connection connection) {
  try {
      // The admin API lets us create, manage and delete tables
    Admin admin = connection.getAdmin();
    // [END connecting_to_bigtable]

    // [START creating_a_table]
    // Create a table with a single column family
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));

    admin.createTable(descriptor);
    // [END creating_a_table]
  } catch (IOException e) {
    return "Table exists.";
  }
  return "Create table " + Bytes.toString(TABLE_NAME);
}
 
Example #14
Source File: Main.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
Example #15
Source File: HBaseSimpleDemo.java    From bigdata-tutorial with Apache License 2.0 6 votes vote down vote up
public Boolean createTable(String tableName, String familyName) throws Exception {
	HBaseAdmin admin = new HBaseAdmin(hconn);
	if (admin.tableExists(tableName)) {
		LOGGER.warn(">>>> Table {} exists!", tableName);
		admin.close();
		return false;
	}
	HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
	tableDesc.addFamily(new HColumnDescriptor(familyName));
	admin.createTable(tableDesc);
	LOGGER.info(">>>> Table {} create success!", tableName);

	admin.close();
	return true;

}
 
Example #16
Source File: IndexerDryRunTest.java    From hbase-indexer with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
    HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TEST_TABLE_NAME));
    tableDescriptor.addFamily(new HColumnDescriptor(TEST_COLFAM_NAME));
    HBASE_ADMIN.createTable(tableDescriptor);
    
    recordTable = HBASE_ADMIN.getConnection().getTable(TableName.valueOf(TEST_TABLE_NAME));
    
    opts = new HBaseIndexingOptions(new Configuration());
    opts.zkHost = SOLR_TEST_UTILITY.getZkConnectString();
    opts.hbaseTableName = Bytes.toString(TEST_TABLE_NAME);
    opts.hbaseIndexerConfigFile = new File(Resources.getResource(getClass(), "user_indexer.xml").toURI());
    opts.collection = "collection1";
    opts.shards = 1;
    opts.reducers = 1;
    opts.fanout = Integer.MAX_VALUE;
    opts.isDryRun = true;
   
    opts.updateConflictResolver = RetainMostRecentUpdateConflictResolver.class.getName();
    opts.isVerbose = true;

    opts.hBaseAdmin = HBASE_ADMIN;
}
 
Example #17
Source File: Helper.java    From antsdb with GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void createTable(Connection conn, String namespace, String tableName, Algorithm compressionType) {
    // Check whether table already exists
    if (Helper.existsTable(conn, namespace, tableName)) {
        Helper.dropTable(conn, namespace, tableName);
    }
    if (!Helper.existsTable(conn, namespace, tableName)) {
        
        // Create table
        try (Admin admin = conn.getAdmin()) {
        HTableDescriptor table = new HTableDescriptor(TableName.valueOf(namespace, tableName));
        table.addFamily(new HColumnDescriptor(DATA_COLUMN_FAMILY).setCompressionType(compressionType));
        _log.debug("creating table {}", table.toString());
        admin.createTable(table);
        } 
        catch (Exception ex) {
            throw new OrcaHBaseException(ex, "Failed to create table - " + tableName);
        }
    }
}
 
Example #18
Source File: HBaseTest.java    From codes-scratch-crawler with Apache License 2.0 6 votes vote down vote up
public static void createTable(String tableName) throws IOException {
  HBaseAdmin admin = new HBaseAdmin(conf);
  if (!admin.tableExists(tableName)) {
    HTableDescriptor tableDesc = new HTableDescriptor(tableName);
    tableDesc.addFamily(new HColumnDescriptor("ip:"));
    tableDesc.addFamily(new HColumnDescriptor("time:"));
    tableDesc.addFamily(new HColumnDescriptor("type:"));
    tableDesc.addFamily(new HColumnDescriptor("cookie:"));
    // 注意这个C列,一下简单以此列来说明列存储
    tableDesc.addFamily(new HColumnDescriptor("c:"));
    admin.createTable(tableDesc);
    System.out.println("table create ok!");
  } else {
    System.out.println("table already exists!");
  }
}
 
Example #19
Source File: AlterTableIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetPropertyAndAddColumnUsingDefaultColumnFamilySpecifier() throws Exception {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
	Connection conn = DriverManager.getConnection(getUrl(), props);
	String ddl = "CREATE TABLE SETPROPDEFCF " +
			"  (a_string varchar not null, col1 integer, CF1.col2 integer" +
			"  CONSTRAINT pk PRIMARY KEY (a_string)) DEFAULT_COLUMN_FAMILY = 'XYZ'\n";
	try {
        conn.createStatement().execute(ddl);
        conn.createStatement().execute("ALTER TABLE SETPROPDEFCF ADD col4 integer XYZ.REPLICATION_SCOPE=1 ");
        conn.createStatement().execute("ALTER TABLE SETPROPDEFCF ADD XYZ.col5 integer IN_MEMORY=true ");
        try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes("SETPROPDEFCF")).getColumnFamilies();
            assertEquals(2, columnFamilies.length);
            assertEquals("CF1", columnFamilies[0].getNameAsString());
            assertFalse(columnFamilies[0].isInMemory());
            assertEquals(0, columnFamilies[0].getScope());
            assertEquals("XYZ", columnFamilies[1].getNameAsString());
            assertTrue(columnFamilies[1].isInMemory());
            assertEquals(1, columnFamilies[1].getScope());
        }
    } finally {
        conn.close();
    }
}
 
Example #20
Source File: TestSnapshotFilter.java    From phoenix-omid with Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
    if (!admin.tableExists(TableName.valueOf(tableName))) {
        LOG.info("Creating {} table...", tableName);
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        for (byte[] family : families) {
            HColumnDescriptor datafam = new HColumnDescriptor(family);
            datafam.setMaxVersions(MAX_VERSIONS);
            desc.addFamily(datafam);
        }

        int priority = Coprocessor.PRIORITY_HIGHEST;

        desc.addCoprocessor(OmidSnapshotFilter.class.getName(),null,++priority,null);
        desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation",null,++priority,null);

        admin.createTable(desc);
        try {
            hbaseTestUtil.waitTableAvailable(TableName.valueOf(tableName),5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}
 
Example #21
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {}:{} since it already exists.",
                stateTable.getNamespaceAsString(), stateTable.getNameAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}:{}", stateTable.getNamespaceAsString(), stateTable.getNameAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {}:{} since it already exists.",
              stateTable.getNamespaceAsString(), stateTable.getNameAsString(), ex);
  }
}
 
Example #22
Source File: TestSnapshotFilterLL.java    From phoenix-omid with Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
    if (!admin.tableExists(TableName.valueOf(tableName))) {
        LOG.info("Creating {} table...", tableName);
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        for (byte[] family : families) {
            HColumnDescriptor datafam = new HColumnDescriptor(family);
            datafam.setMaxVersions(MAX_VERSIONS);
            desc.addFamily(datafam);
        }

        int priority = Coprocessor.PRIORITY_HIGHEST;

        desc.addCoprocessor(OmidSnapshotFilter.class.getName(),null,++priority,null);
        desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation",null,++priority,null);

        admin.createTable(desc);
        try {
            hbaseTestUtil.waitTableAvailable(TableName.valueOf(tableName),5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}
 
Example #23
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {} since it already exists.",
                stateTable.getNameWithNamespaceInclAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {} since it already exists.",
              stateTable.getNameWithNamespaceInclAsString(), ex);
  }
}
 
Example #24
Source File: OnlineOfflineStateModelFactory.java    From terrapin with Apache License 2.0 6 votes vote down vote up
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example #25
Source File: HelloWorldTest.java    From java-docs-samples with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() {
  projectId = requireEnv("GOOGLE_CLOUD_PROJECT");
  instanceId = requireEnv(INSTANCE_ENV);
  try (Connection connection = BigtableConfiguration.connect(projectId, instanceId)) {
    Admin admin = connection.getAdmin();
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_ID));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));
    admin.createTable(descriptor);

    Table table = connection.getTable(TableName.valueOf(Bytes.toBytes(TABLE_ID)));

    String rowKey = "phone#4c410523#20190401";
    Put put = new Put(Bytes.toBytes(rowKey));

    put.addColumn(
        Bytes.toBytes(COLUMN_FAMILY_NAME), Bytes.toBytes("os_name"), Bytes.toBytes("android"));
    table.put(put);

  } catch (Exception e) {
    System.out.println("Error during beforeClass: \n" + e.toString());
  }
}
 
Example #26
Source File: BigtableHelloWorld.java    From java-docs-samples with Apache License 2.0 6 votes vote down vote up
/**
 * Create a table -- first time only.
 * @param connection to Bigtable
 * @return the status
 */
public static String create(Connection connection) {
  try {
    // The admin API lets us create, manage and delete tables
    Admin admin = connection.getAdmin();

    // Create a table with a single column family
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));

    admin.createTable(descriptor);
  } catch (IOException e) {
    return "Table exists.";
  }
  return "Create table " + Bytes.toString(TABLE_NAME);
}
 
Example #27
Source File: HFileOutputFormat3.java    From kylin with Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
Example #28
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
private HRegion updateTtl(HRegion region, byte[] family, long ttl) throws Exception {
  region.close();
  HTableDescriptor htd = region.getTableDesc();
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  return HRegion.openHRegion(region.getRegionInfo(), htd, region.getLog(), conf,
                                        new MockRegionServerServices(conf, null), null);
}
 
Example #29
Source File: Configure.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
public static HTableDescriptor genHTableDescriptor(String tableName, short replica, boolean lobenable) {
  HTableDescriptor ht = new HTableDescriptor(tableName);
  HColumnDescriptor desc = new HColumnDescriptor(FAMILY_NAME);
  if (replica != Short.MIN_VALUE) {
    desc.setReplication(replica);
    System.out.println("genHTableDescriptor(String,short):replica---"
        + replica);
  }
  desc.setBlbStoreEnabled(true);
  ht.addFamily(desc);
  return ht;
}
 
Example #30
Source File: CreateNewHbase.java    From learning-hadoop with Apache License 2.0 5 votes vote down vote up
public static void createTable(String tabName)throws Exception{
	HBaseAdmin admin = new HBaseAdmin(conf);
	if (admin.tableExists(tabName)) {
	System.out.println(tabName + " exists!");
	admin.close();
	return;
	}
	HTableDescriptor table = new HTableDescriptor(tabName);
	table.addFamily(new HColumnDescriptor("f1"));
	table.addFamily(new HColumnDescriptor("f2"));
	table.addFamily(new HColumnDescriptor("f3"));
	table.getFamily(Bytes.toBytes("f1"));
	admin.createTable(table);
	admin.close();
}