Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor

The following examples show how to use org.apache.hadoop.hbase.HColumnDescriptor. These examples are extracted from open source projects.
Example 1
Project: kylin   File: HFileOutputFormat3.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
Example 2
Project: flink-learning   File: Main.java    License: Apache License 2.0 6 votes vote down vote up
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
Example 3
Project: flink   File: HBaseTestingClusterAutoStarter.java    License: Apache License 2.0 6 votes vote down vote up
protected static void createTable(TableName tableName, byte[][] columnFamilyName, byte[][] splitKeys) {
	LOG.info("HBase minicluster: Creating table " + tableName.getNameAsString());

	assertNotNull("HBaseAdmin is not initialized successfully.", admin);
	HTableDescriptor desc = new HTableDescriptor(tableName);
	for (byte[] fam : columnFamilyName) {
		HColumnDescriptor colDef = new HColumnDescriptor(fam);
		desc.addFamily(colDef);
	}

	try {
		admin.createTable(desc, splitKeys);
		createdTables.add(tableName);
		assertTrue("Fail to create the table", admin.tableExists(tableName));
	} catch (IOException e) {
		assertNull("Exception found while creating table", e);
	}
}
 
Example 4
Project: beam   File: HBaseIOIT.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws IOException {
  PipelineOptionsFactory.register(HBasePipelineOptions.class);
  options = TestPipeline.testingPipelineOptions().as(HBasePipelineOptions.class);

  numberOfRows = options.getNumberOfRecords();

  conf.setStrings("hbase.zookeeper.quorum", options.getHbaseServerName());
  conf.setStrings("hbase.cluster.distributed", "true");
  conf.setStrings("hbase.client.retries.number", "1");

  Connection connection = ConnectionFactory.createConnection(conf);

  admin = connection.getAdmin();
  HTableDescriptor testTable =
      new HTableDescriptor(TableName.valueOf(TABLE_NAME))
          .addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  admin.createTable(testTable);
}
 
Example 5
Project: kylin-on-parquet-v2   File: HFileOutputFormat3.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
Example 6
Project: kylin-on-parquet-v2   File: HFileOutputFormat3.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to bloom type map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    StringBuilder bloomTypeConfigValue = new StringBuilder();
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            bloomTypeConfigValue.append('&');
        }
        bloomTypeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        bloomTypeConfigValue.append('=');
        String bloomType = familyDescriptor.getBloomFilterType().toString();
        if (bloomType == null) {
            bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
        }
        bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
    }
    conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
}
 
Example 7
Project: hbase   File: TestRegionReplicaFailover.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception {
  Configuration conf = HTU.getConfiguration();
 // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
  conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true);
  conf.setInt("replication.stats.thread.period.seconds", 5);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);

  HTU.startMiniCluster(NB_SERVERS);
  htd = HTU.createTableDescriptor(
    TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().length()-3)),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  htd.setRegionReplication(3);
  HTU.getAdmin().createTable(htd);
}
 
Example 8
Project: aws-big-data-blog   File: HBaseUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Helper method to create an HBase table in an Amazon EMR cluster with HBase installed
 * 
 * @param tableName - name for table to create
 * @param dnsId - Amazon EMR master node public DNS
 * @param hbaseRestPort - HBase Rest port
 */
public static void createTable(String tableName, String dnsId, int hbaseRestPort) {
	Configuration config = HBaseConfiguration.create();
	RemoteAdmin admin = new RemoteAdmin(new Client(new Cluster().add(dnsId, hbaseRestPort)), config);
	String [] families = {"user", "address", "contact", "likes"};
	try {
		if (admin.isTableAvailable(tableName)) {
			LOG.info("table already exists!");
			return;
		} else {
			HTableDescriptor tableDesc = new HTableDescriptor(tableName);
			for (int i = 0; i < families.length; i++) {
				tableDesc.addFamily(new HColumnDescriptor(families[i]));
			}
			admin.createTable(tableDesc);
			isTableAvailable = true;
			LOG.info("create table " + tableName + " ok.");
		} 

	} catch (IOException e) {
		LOG.error(e, e.getCause()); 
	}

}
 
Example 9
Project: phoenix   File: AlterTableIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAddColumnForNewColumnFamily() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE testAddColumnForNewColumnFamily (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) SALT_BUCKETS = 8";
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.createStatement().execute(ddl);
    ddl = "ALTER TABLE testAddColumnForNewColumnFamily ADD CF.STRING VARCHAR";
    conn1.createStatement().execute(ddl);
    try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes("testAddColumnForNewColumnFamily".toUpperCase())).getColumnFamilies();
        assertEquals(2, columnFamilies.length);
        assertEquals("0", columnFamilies[0].getNameAsString());
        assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[0].getTimeToLive());
        assertEquals("CF", columnFamilies[1].getNameAsString());
        assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[1].getTimeToLive());
    }
}
 
Example 10
Project: kylin-on-parquet-v2   File: RemoteDictionaryStore.java    License: Apache License 2.0 6 votes vote down vote up
public void init(String[] cfs) throws IOException {
    logger.debug("Checking streaming remote store for {} at {}.", tableName, String.join(", ", cfs));
    Connection conn = getConnection();
    Admin admin = conn.getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(hbaseTableName));
    for (String family : cfs) {
        HColumnDescriptor fd = new HColumnDescriptor(family);
        desc.addFamily(fd);
    }
    DistributedLock lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentProcess();
    try {
        boolean locked = lock.lock(lockPath());
        if (locked && !admin.tableExists(TableName.valueOf(hbaseTableName))) {
            logger.info("Create htable with {}.", desc);
            admin.createTable(desc);
        } else {
            logger.info("Table exists or cannot fetch lock {}", desc);
        }
    } finally {
        admin.close();
        if (lock != null && lock.isLockedByMe(lockPath())) {
            lock.unlock(lockPath());
        }
    }
    table = conn.getTable(TableName.valueOf(hbaseTableName));
}
 
Example 11
Project: BigData-In-Practice   File: HBaseUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * 创建HBase表
 *
 * @param tableName 表名
 * @param cfs       列族的数组
 * @return 是否创建成功
 */
public static boolean createTable(String tableName, String[] cfs) {
    try (HBaseAdmin admin = (HBaseAdmin) HBaseConn.getHBaseConn().getAdmin()) {
        if (admin.tableExists(tableName)) {
            return false;
        }
        HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
        Arrays.stream(cfs).forEach(cf -> {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf);
            columnDescriptor.setMaxVersions(1);
            tableDescriptor.addFamily(columnDescriptor);
        });
        admin.createTable(tableDescriptor);
    } catch (Exception e) {
        e.printStackTrace();
    }
    return true;
}
 
Example 12
Project: cloud-bigtable-examples   File: BigtableHelloWorld.java    License: Apache License 2.0 6 votes vote down vote up
public static String create(Connection connection) {
  try {
      // The admin API lets us create, manage and delete tables
    Admin admin = connection.getAdmin();
    // [END connecting_to_bigtable]

    // [START creating_a_table]
    // Create a table with a single column family
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));

    admin.createTable(descriptor);
    // [END creating_a_table]
  } catch (IOException e) {
    return "Table exists.";
  }
  return "Create table " + Bytes.toString(TABLE_NAME);
}
 
Example 13
Project: flink-learning   File: Main.java    License: Apache License 2.0 6 votes vote down vote up
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
Example 14
Project: bigdata-tutorial   File: HBaseSimpleDemo.java    License: Apache License 2.0 6 votes vote down vote up
public Boolean createTable(String tableName, String familyName) throws Exception {
	HBaseAdmin admin = new HBaseAdmin(hconn);
	if (admin.tableExists(tableName)) {
		LOGGER.warn(">>>> Table {} exists!", tableName);
		admin.close();
		return false;
	}
	HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
	tableDesc.addFamily(new HColumnDescriptor(familyName));
	admin.createTable(tableDesc);
	LOGGER.info(">>>> Table {} create success!", tableName);

	admin.close();
	return true;

}
 
Example 15
Project: hbase-indexer   File: IndexerDryRunTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
    HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TEST_TABLE_NAME));
    tableDescriptor.addFamily(new HColumnDescriptor(TEST_COLFAM_NAME));
    HBASE_ADMIN.createTable(tableDescriptor);
    
    recordTable = HBASE_ADMIN.getConnection().getTable(TableName.valueOf(TEST_TABLE_NAME));
    
    opts = new HBaseIndexingOptions(new Configuration());
    opts.zkHost = SOLR_TEST_UTILITY.getZkConnectString();
    opts.hbaseTableName = Bytes.toString(TEST_TABLE_NAME);
    opts.hbaseIndexerConfigFile = new File(Resources.getResource(getClass(), "user_indexer.xml").toURI());
    opts.collection = "collection1";
    opts.shards = 1;
    opts.reducers = 1;
    opts.fanout = Integer.MAX_VALUE;
    opts.isDryRun = true;
   
    opts.updateConflictResolver = RetainMostRecentUpdateConflictResolver.class.getName();
    opts.isVerbose = true;

    opts.hBaseAdmin = HBASE_ADMIN;
}
 
Example 16
Project: antsdb   File: Helper.java    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void createTable(Connection conn, String namespace, String tableName, Algorithm compressionType) {
    // Check whether table already exists
    if (Helper.existsTable(conn, namespace, tableName)) {
        Helper.dropTable(conn, namespace, tableName);
    }
    if (!Helper.existsTable(conn, namespace, tableName)) {
        
        // Create table
        try (Admin admin = conn.getAdmin()) {
        HTableDescriptor table = new HTableDescriptor(TableName.valueOf(namespace, tableName));
        table.addFamily(new HColumnDescriptor(DATA_COLUMN_FAMILY).setCompressionType(compressionType));
        _log.debug("creating table {}", table.toString());
        admin.createTable(table);
        } 
        catch (Exception ex) {
            throw new OrcaHBaseException(ex, "Failed to create table - " + tableName);
        }
    }
}
 
Example 17
Project: codes-scratch-crawler   File: HBaseTest.java    License: Apache License 2.0 6 votes vote down vote up
public static void createTable(String tableName) throws IOException {
  HBaseAdmin admin = new HBaseAdmin(conf);
  if (!admin.tableExists(tableName)) {
    HTableDescriptor tableDesc = new HTableDescriptor(tableName);
    tableDesc.addFamily(new HColumnDescriptor("ip:"));
    tableDesc.addFamily(new HColumnDescriptor("time:"));
    tableDesc.addFamily(new HColumnDescriptor("type:"));
    tableDesc.addFamily(new HColumnDescriptor("cookie:"));
    // 注意这个C列,一下简单以此列来说明列存储
    tableDesc.addFamily(new HColumnDescriptor("c:"));
    admin.createTable(tableDesc);
    System.out.println("table create ok!");
  } else {
    System.out.println("table already exists!");
  }
}
 
Example 18
Project: phoenix   File: AlterTableIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSetPropertyAndAddColumnUsingDefaultColumnFamilySpecifier() throws Exception {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
	Connection conn = DriverManager.getConnection(getUrl(), props);
	String ddl = "CREATE TABLE SETPROPDEFCF " +
			"  (a_string varchar not null, col1 integer, CF1.col2 integer" +
			"  CONSTRAINT pk PRIMARY KEY (a_string)) DEFAULT_COLUMN_FAMILY = 'XYZ'\n";
	try {
        conn.createStatement().execute(ddl);
        conn.createStatement().execute("ALTER TABLE SETPROPDEFCF ADD col4 integer XYZ.REPLICATION_SCOPE=1 ");
        conn.createStatement().execute("ALTER TABLE SETPROPDEFCF ADD XYZ.col5 integer IN_MEMORY=true ");
        try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes("SETPROPDEFCF")).getColumnFamilies();
            assertEquals(2, columnFamilies.length);
            assertEquals("CF1", columnFamilies[0].getNameAsString());
            assertFalse(columnFamilies[0].isInMemory());
            assertEquals(0, columnFamilies[0].getScope());
            assertEquals("XYZ", columnFamilies[1].getNameAsString());
            assertTrue(columnFamilies[1].isInMemory());
            assertEquals(1, columnFamilies[1].getScope());
        }
    } finally {
        conn.close();
    }
}
 
Example 19
Project: phoenix-omid   File: TestSnapshotFilter.java    License: Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
    if (!admin.tableExists(TableName.valueOf(tableName))) {
        LOG.info("Creating {} table...", tableName);
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        for (byte[] family : families) {
            HColumnDescriptor datafam = new HColumnDescriptor(family);
            datafam.setMaxVersions(MAX_VERSIONS);
            desc.addFamily(datafam);
        }

        int priority = Coprocessor.PRIORITY_HIGHEST;

        desc.addCoprocessor(OmidSnapshotFilter.class.getName(),null,++priority,null);
        desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation",null,++priority,null);

        admin.createTable(desc);
        try {
            hbaseTestUtil.waitTableAvailable(TableName.valueOf(tableName),5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}
 
Example 20
Project: phoenix-omid   File: TestSnapshotFilterLL.java    License: Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
    if (!admin.tableExists(TableName.valueOf(tableName))) {
        LOG.info("Creating {} table...", tableName);
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        for (byte[] family : families) {
            HColumnDescriptor datafam = new HColumnDescriptor(family);
            datafam.setMaxVersions(MAX_VERSIONS);
            desc.addFamily(datafam);
        }

        int priority = Coprocessor.PRIORITY_HIGHEST;

        desc.addCoprocessor(OmidSnapshotFilter.class.getName(),null,++priority,null);
        desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation",null,++priority,null);

        admin.createTable(desc);
        try {
            hbaseTestUtil.waitTableAvailable(TableName.valueOf(tableName),5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}
 
Example 21
Project: phoenix-tephra   File: HBaseTransactionPruningPlugin.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {} since it already exists.",
                stateTable.getNameWithNamespaceInclAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {} since it already exists.",
              stateTable.getNameWithNamespaceInclAsString(), ex);
  }
}
 
Example 22
Project: java-docs-samples   File: HelloWorldTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() {
  projectId = requireEnv("GOOGLE_CLOUD_PROJECT");
  instanceId = requireEnv(INSTANCE_ENV);
  try (Connection connection = BigtableConfiguration.connect(projectId, instanceId)) {
    Admin admin = connection.getAdmin();
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_ID));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));
    admin.createTable(descriptor);

    Table table = connection.getTable(TableName.valueOf(Bytes.toBytes(TABLE_ID)));

    String rowKey = "phone#4c410523#20190401";
    Put put = new Put(Bytes.toBytes(rowKey));

    put.addColumn(
        Bytes.toBytes(COLUMN_FAMILY_NAME), Bytes.toBytes("os_name"), Bytes.toBytes("android"));
    table.put(put);

  } catch (Exception e) {
    System.out.println("Error during beforeClass: \n" + e.toString());
  }
}
 
Example 23
Project: java-docs-samples   File: BigtableHelloWorld.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a table -- first time only.
 * @param connection to Bigtable
 * @return the status
 */
public static String create(Connection connection) {
  try {
    // The admin API lets us create, manage and delete tables
    Admin admin = connection.getAdmin();

    // Create a table with a single column family
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));

    admin.createTable(descriptor);
  } catch (IOException e) {
    return "Table exists.";
  }
  return "Create table " + Bytes.toString(TABLE_NAME);
}
 
Example 24
Project: phoenix-tephra   File: AbstractHBaseTableTest.java    License: Apache License 2.0 6 votes vote down vote up
protected static Table createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return testUtil.getConnection().getTable(TableName.valueOf(tableName));
}
 
Example 25
Project: terrapin   File: OnlineOfflineStateModelFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 26
Project: phoenix-tephra   File: HBaseTransactionPruningPlugin.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {}:{} since it already exists.",
                stateTable.getNamespaceAsString(), stateTable.getNameAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}:{}", stateTable.getNamespaceAsString(), stateTable.getNameAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {}:{} since it already exists.",
              stateTable.getNamespaceAsString(), stateTable.getNameAsString(), ex);
  }
}
 
Example 27
Project: phoenix-tephra   File: HBaseTransactionPruningPlugin.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {} since it already exists.",
                stateTable.getNameWithNamespaceInclAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {} since it already exists.",
              stateTable.getNameWithNamespaceInclAsString(), ex);
  }
}
 
Example 28
Project: pinpoint   File: HBaseAdminTemplate.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void addColumn(TableName tableName, HColumnDescriptor hcd) {
    execute(admin -> {
        admin.addColumn(tableName, hcd);
        logger.info("{} table added column : {}", tableName, hcd);
        return null;
    });
}
 
Example 29
Project: flink-learning   File: HBaseStreamWriteMain.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void open(int taskNumber, int numTasks) throws IOException {
    connection = ConnectionFactory.createConnection(configuration);
    TableName tableName = TableName.valueOf(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_TABLE_NAME));
    Admin admin = connection.getAdmin();
    if (!admin.tableExists(tableName)) { //检查是否有该表,如果没有,创建
        log.info("==============不存在表 = {}", tableName);
        admin.createTable(new HTableDescriptor(TableName.valueOf(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_TABLE_NAME)))
                .addFamily(new HColumnDescriptor(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_COLUMN_NAME))));
    }
    table = connection.getTable(tableName);

    this.taskNumber = String.valueOf(taskNumber);
}
 
Example 30
Project: kylin-on-parquet-v2   File: ITAclTableMigrationToolTest.java    License: Apache License 2.0 5 votes vote down vote up
private void creatTable(Admin admin, Configuration conf, TableName tableName, String[] family) throws IOException {
    HTableDescriptor desc = new HTableDescriptor(tableName);
    for (int i = 0; i < family.length; i++) {
        desc.addFamily(new HColumnDescriptor(family[i]));
    }
    if (admin.tableExists(tableName)) {
        throw new IOException("Table : " + tableName + " exists");
    } else {
        admin.createTable(desc);
        logger.info("create table Success!");
    }
}
 
Example 31
Project: phoenix   File: AlterTableIT.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSetHColumnPropertyAndAddColumnForNewCFForTableWithOnlyPKCols() throws Exception {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
	Connection conn = DriverManager.getConnection(getUrl(), props);
	conn.setAutoCommit(false);
	try {
	   	String ddl = "create table IF NOT EXISTS SETHCPROPADDNEWCFCOLPKONLY ("
    		    + " id char(1) NOT NULL,"
    		    + " col1 integer NOT NULL,"
    		    + " col2 bigint NOT NULL,"
    		    + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
    		    + " ) TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'";
        conn.createStatement().execute(ddl);
        ddl = "ALTER TABLE SETHCPROPADDNEWCFCOLPKONLY ADD NEWCF.COL3 INTEGER IN_MEMORY=true";
		conn.createStatement().execute(ddl);
		conn.commit();
		try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
			HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("SETHCPROPADDNEWCFCOLPKONLY"));
			HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
			assertEquals(2, columnFamilies.length);
			assertEquals("NEWCF", columnFamilies[0].getNameAsString());
			assertEquals(true, columnFamilies[0].isInMemory());
			assertEquals("XYZ", columnFamilies[1].getNameAsString());
			assertEquals(false, columnFamilies[1].isInMemory());
		}
	} finally {
		conn.close();
	}
}
 
Example 32
Project: hbase   File: ThriftUtilities.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This utility method creates a new Thrift ColumnDescriptor "struct" based on
 * an Hbase HColumnDescriptor object.
 *
 * @param in
 *          Hbase HColumnDescriptor object
 * @return Thrift ColumnDescriptor
 */
static public ColumnDescriptor colDescFromHbase(HColumnDescriptor in) {
  ColumnDescriptor col = new ColumnDescriptor();
  col.name = ByteBuffer.wrap(Bytes.add(in.getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY));
  col.maxVersions = in.getMaxVersions();
  col.compression = in.getCompressionType().toString();
  col.inMemory = in.isInMemory();
  col.blockCacheEnabled = in.isBlockCacheEnabled();
  col.bloomFilterType = in.getBloomFilterType().toString();
  col.timeToLive = in.getTimeToLive();
  return col;
}
 
Example 33
/**
 * Create simple HTD with three families: 'a', 'b', and 'c'
 * @param tableName name of the table descriptor
 * @return
 */
private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
  htd.addFamily(a);
  HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
  htd.addFamily(b);
  HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
  htd.addFamily(c);
  return htd;
}
 
Example 34
Project: xxhadoop   File: HBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
public void createTable(String tableName, String... familyNames) throws IOException {
	if (admin.tableExists(TableName.valueOf(tableName))) {
		LOGGER.info("TABLE " + tableName.toString() + " ALREADY EXIST");
		return;
	}
	HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
	for (String familyName : familyNames) {
		desc.addFamily(new HColumnDescriptor(familyName));
	}
	admin.createTable(desc);
	LOGGER.info("TABLE " + tableName.toString() + " CREATED");
}
 
Example 35
Project: hbase   File: TestRegionReplicaReplicationEndpoint.java    License: Apache License 2.0 5 votes vote down vote up
public void testRegionReplicaReplication(int regionReplication) throws Exception {
  // test region replica replication. Create a table with single region, write some data
  // ensure that data is replicated to the secondary region
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
      + regionReplication);
  HTableDescriptor htd = HTU.createTableDescriptor(TableName.valueOf(tableName.toString()),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  htd.setRegionReplication(regionReplication);
  HTU.getAdmin().createTable(htd);
  TableName tableNameNoReplicas =
      TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
  HTU.deleteTableIfAny(tableNameNoReplicas);
  HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  Table tableNoReplicas = connection.getTable(tableNameNoReplicas);

  try {
    // load some data to the non-replicated table
    HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000);

    // load the data to the table
    HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

    verifyReplication(tableName, regionReplication, 0, 1000);

  } finally {
    table.close();
    tableNoReplicas.close();
    HTU.deleteTableIfAny(tableNameNoReplicas);
    connection.close();
  }
}
 
Example 36
Project: java-study   File: HBaseUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 创建表
 * 
 * @param tableName
 *            表名
 * @param columnFamily
 *            列族
 */
public static void creatTable(String tableName, String[] columnFamily) {
	if(null==tableName||tableName.length()==0){
		return;
	}
	if(null==columnFamily||columnFamily.length==0){
		return;
	}
	// 创建表名对象
	TableName tn = TableName.valueOf(tableName);
	// a.判断数据库是否存在
	try {
		// 获取会话
		admin = getConnection().getAdmin();
		if (admin.tableExists(tn)) {
			System.out.println(tableName + " 表存在,删除表....");
			// 先使表设置为不可编辑
			admin.disableTable(tn);
			// 删除表
			admin.deleteTable(tn);
			System.out.println("表删除成功.....");
		}
		// 创建表结构对象
		HTableDescriptor htd = new HTableDescriptor(tn);
		for (String str : columnFamily) {
			// 创建列族结构对象
			HColumnDescriptor hcd = new HColumnDescriptor(str);
			htd.addFamily(hcd);
		}
		// 创建表
		admin.createTable(htd);
		System.out.println(tableName + " 表创建成功!");
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		close();
	}
}
 
Example 37
Project: PoseidonX   File: HTableOperatorImpl.java    License: Apache License 2.0 5 votes vote down vote up
private HColumnDescriptor changeCd(ColumnDescriptor cd){
    HColumnDescriptor family = new HColumnDescriptor(cd.getFamilyName());
    if(cd.isCompress()){
        family.setCompactionCompressionType(Compression.Algorithm.GZ);
    }
    return family ;
}
 
Example 38
Project: phoenix   File: AlterTableIT.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSetHColumnPropertyAndAddColumnForDefaultCFForTableWithOnlyPKCols() throws Exception {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
	Connection conn = DriverManager.getConnection(getUrl(), props);
	conn.setAutoCommit(false);
	try {
	   	String ddl = "create table IF NOT EXISTS SETHCPROPADDCOLPKONLY ("
    		    + " id char(1) NOT NULL,"
    		    + " col1 integer NOT NULL,"
    		    + " col2 bigint NOT NULL,"
    		    + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
    		    + " ) TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'";
        conn.createStatement().execute(ddl);
        ddl = "ALTER TABLE SETHCPROPADDCOLPKONLY ADD COL3 INTEGER IN_MEMORY=true";
		conn.createStatement().execute(ddl);
		conn.commit();
		try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
			HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("SETHCPROPADDCOLPKONLY"));
			HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
			assertEquals(1, columnFamilies.length);
			assertEquals(true, columnFamilies[0].isInMemory());
			assertEquals("XYZ", columnFamilies[0].getNameAsString());
		}
	} finally {
		conn.close();
	}
}
 
Example 39
Project: cloud-bigtable-examples   File: HBaseCLI.java    License: Apache License 2.0 5 votes vote down vote up
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException {
    String pattern = null;
    if (args.size() == 1) {
        pattern = args.get(0);
    } else if (args.size() != 0) {
        throw new InvalidArgsException(args);
    }

    Admin admin = connection.getAdmin();
    HTableDescriptor[] tables;

    // We use the listTables() method on the Admin instance
    // to get a list of HTableDescriptor objects.
    if (pattern != null) {
        tables = admin.listTables(pattern);
    } else {
        tables = admin.listTables();
    }

    // For each of the tables we get the table name and column families
    // registered with the table, and print them out.
    for (HTableDescriptor table : tables) {
        HColumnDescriptor[] columnFamilies = table.getColumnFamilies();
        String columnFamilyNames = "";
        for (HColumnDescriptor columnFamily : columnFamilies) {
            columnFamilyNames += columnFamily.getNameAsString() + ",";
        }
        if (columnFamilyNames.length() > 0) {
            columnFamilyNames = " <" + columnFamilyNames.substring(0, columnFamilyNames.length()) + ">";
        }

        System.out.println(table.getTableName() + columnFamilyNames);
    }
}
 
Example 40
Project: bigdata-tutorial   File: HBaseUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * print table info
 *
 * @param table
 */
public static void printTableInfo(HTableInterface table) {
	try {
		HTableDescriptor desc = table.getTableDescriptor();
		LOGGER.info(">>>> Print Table {} Desc", new String(table.getTableName()));
		for (HColumnDescriptor colDesc : desc.getColumnFamilies()) {
			LOGGER.info(">>>> family column: {}", colDesc.getNameAsString());

		}
	} catch (Exception ex) {
		LOGGER.error("printTable info Error:", ex);
	}
}