Java Code Examples for org.apache.hadoop.hbase.client.TableDescriptorBuilder#addColumnFamily()

The following examples show how to use org.apache.hadoop.hbase.client.TableDescriptorBuilder#addColumnFamily() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion updateTtl(HRegion region, byte[] family, long ttl) throws Exception {
  region.close();
  TableDescriptorBuilder tableBuilder =
      TableDescriptorBuilder.newBuilder(region.getTableDescriptor());
  ColumnFamilyDescriptorBuilder cfd =
      ColumnFamilyDescriptorBuilder.newBuilder(tableBuilder.build().getColumnFamily(family));
  if (ttl > 0) {
    cfd.setValue(Bytes.toBytes(TxConstants.PROPERTY_TTL), Bytes.toBytes(String.valueOf(ttl)));
  }
  cfd.setMaxVersions(10);
  tableBuilder.removeColumnFamily(family);
  tableBuilder.addColumnFamily(cfd.build());
  return HRegion
      .openHRegion(region.getRegionInfo(), tableBuilder.build(), region.getWAL(), conf,
        new LocalRegionServerServices(conf, ServerName
            .valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())),
        null);
}
 
Example 2
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Create simple HTD with three families: 'a', 'b', and 'c'
 * @param tableName name of the table descriptor
 * @return
 */
private TableDescriptor createBasic3FamilyHTD(final String tableName) {
  TableDescriptorBuilder tableBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
  ColumnFamilyDescriptor  a = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("a"));
  tableBuilder.addColumnFamily(a);
  ColumnFamilyDescriptor b = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("b"));
  tableBuilder.addColumnFamily(b);
  ColumnFamilyDescriptor c = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("c"));
  tableBuilder.addColumnFamily(c);
  return tableBuilder.build();
}
 
Example 3
Source File: DropMetadataIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testDropViewKeepsHTable() throws Exception {
    Connection conn = getConnection();
    Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    String hbaseNativeViewName = generateUniqueName();

    byte[] hbaseNativeBytes = SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, hbaseNativeViewName);
    try {
         TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(hbaseNativeBytes));
        ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_NAME)
                .setKeepDeletedCells(KeepDeletedCells.TRUE).build();
        builder.addColumnFamily(columnDescriptor);
        admin.createTable(builder.build());
    } finally {
        admin.close();
    }
    
    conn.createStatement().execute("create view " + hbaseNativeViewName+
            "   (uint_key unsigned_int not null," +
            "    ulong_key unsigned_long not null," +
            "    string_key varchar not null,\n" +
            "    \"1\".uint_col unsigned_int," +
            "    \"1\".ulong_col unsigned_long" +
            "    CONSTRAINT pk PRIMARY KEY (uint_key, ulong_key, string_key))\n" +
            ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'");
    conn.createStatement().execute("drop view " + hbaseNativeViewName);
    conn.close();
}
 
Example 4
Source File: TestIndexManagementUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * @param admin to create the table
 * @param index descriptor to update before creating table
 */
public static void createIndexTable(Admin admin, TableDescriptorBuilder indexBuilder) throws IOException {
      indexBuilder.addColumnFamily(
              ColumnFamilyDescriptorBuilder.newBuilder(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY)
                      .setKeepDeletedCells(KeepDeletedCells.TRUE).build());
  admin.createTable(indexBuilder.build());
}
 
Example 5
Source File: DynamicColumnIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Before
public void initTable() throws Exception {
    tableName = generateUniqueName();
    try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        ConnectionQueryServices services = pconn.getQueryServices();
        try (Admin admin = services.getAdmin()) {
            TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_A));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_B));
            admin.createTable(builder.build());
        }

        try (Table hTable = services.getTable(Bytes.toBytes(tableName))) {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] dv = Bytes.toBytes("DV");
            byte[] first = Bytes.toBytes("F");
            byte[] f1v1 = Bytes.toBytes("F1V1");
            byte[] f1v2 = Bytes.toBytes("F1V2");
            byte[] f2v1 = Bytes.toBytes("F2V1");
            byte[] f2v2 = Bytes.toBytes("F2V2");
            byte[] key = Bytes.toBytes("entry1");

            Put put = new Put(key);
            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
            put.addColumn(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
            put.addColumn(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
            put.addColumn(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
            put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
            mutations.add(put);

            hTable.batch(mutations, null);

            // Create Phoenix table after HBase table was created through the native APIs
            // The timestamp of the table creation must be later than the timestamp of the data
            pconn.createStatement().execute("create table " + tableName + 
            "   (entry varchar not null," +
            "    F varchar," +
            "    A.F1v1 varchar," +
            "    A.F1v2 varchar," +
            "    B.F2v1 varchar" +
            "    CONSTRAINT pk PRIMARY KEY (entry)) COLUMN_ENCODED_BYTES=NONE");
        }

    }
}
 
Example 6
Source File: QueryDatabaseMetaDataIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateOnExistingTable() throws Exception {
    try (PhoenixConnection pconn =
            DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        String tableName = generateUniqueName();// MDTEST_NAME;
        String schemaName = "";// MDTEST_SCHEMA_NAME;
        byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
        byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
        byte[] cfC = Bytes.toBytes("c");
        byte[][] familyNames = new byte[][] { cfB, cfC };
        byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
        Admin admin = pconn.getQueryServices().getAdmin();
        try {
            admin.disableTable(TableName.valueOf(htableName));
            admin.deleteTable(TableName.valueOf(htableName));
            admin.enableTable(TableName.valueOf(htableName));
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }

        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
        for (byte[] familyName : familyNames) {
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
        }
        admin.createTable(builder.build());
        createMDTestTable(pconn, tableName,
            "a." + ColumnFamilyDescriptorBuilder.BLOCKSIZE+ "=" + 50000);

        TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(htableName));
        assertEquals(3, descriptor.getColumnFamilies().length);
        ColumnFamilyDescriptor cdA = descriptor.getColumnFamily(cfA);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
        assertNotEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdA.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using
                                                                          // WITH
        assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
        ColumnFamilyDescriptor cdB = descriptor.getColumnFamily(cfB);
        // Allow KEEP_DELETED_CELLS to be false for VIEW
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdB.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the
                                                                          // original value.
        // CF c should stay the same since it's not a Phoenix cf.
        ColumnFamilyDescriptor cdC = descriptor.getColumnFamily(cfC);
        assertNotNull("Column family not found", cdC);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdC.getBlocksize());
        assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
        assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
        admin.close();

        int rowCount = 5;
        String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
        PreparedStatement ps = pconn.prepareStatement(upsert);
        for (int i = 0; i < rowCount; i++) {
            ps.setString(1, Integer.toString(i));
            ps.setInt(2, i + 1);
            ps.setInt(3, i + 2);
            ps.execute();
        }
        pconn.commit();
        String query = "SELECT count(1) FROM " + tableName;
        ResultSet rs = pconn.createStatement().executeQuery(query);
        assertTrue(rs.next());
        assertEquals(rowCount, rs.getLong(1));

        query = "SELECT id, col1,col2 FROM " + tableName;
        rs = pconn.createStatement().executeQuery(query);
        for (int i = 0; i < rowCount; i++) {
            assertTrue(rs.next());
            assertEquals(Integer.toString(i), rs.getString(1));
            assertEquals(i + 1, rs.getInt(2));
            assertEquals(i + 2, rs.getInt(3));
        }
        assertFalse(rs.next());
    }
}
 
Example 7
Source File: FailForUnsupportedHBaseVersionsIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
 * version. The 'completeness' of this test requires that we run the test with both a version of
 * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
 * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
 * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
 * this functionality, we need to run the test against both a supported and an unsupported version
 * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
 * Codecs).
 * @throws Exception on failure
 */
@Test(timeout = 300000 /* 5 mins */)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    IndexTestingUtils.setupConfig(conf);
    // enable WAL Compression
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);

    // check the version to see if it isn't supported
    String version = VersionInfo.getVersion();
    boolean supported = false;
    if (Indexer.validateVersion(version, conf) == null) {
        supported = true;
    }

    // start the minicluster
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    util.startMiniCluster();

    try {
        // setup the primary table
        TableDescriptorBuilder descBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(
                "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"));
        byte[] family = Bytes.toBytes("f");
        
        descBuilder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
        TableDescriptor desc=descBuilder.build();
        // enable indexing to a non-existant index table
        String indexTableName = "INDEX_TABLE";
        ColumnGroup fam1 = new ColumnGroup(indexTableName);
        fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
        CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
        builder.addIndexGroup(fam1);
        builder.build(desc);

        // get a reference to the regionserver, so we can ensure it aborts
        HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);

        // create the primary table
        Admin admin = util.getAdmin();
        if (supported) {
            admin.createTable(desc);
            assertFalse("Hosting regeion server failed, even the HBase version (" + version
                    + ") supports WAL Compression.", server.isAborted());
        } else {
            admin.createTableAsync(desc, null);

            // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
            // broken.
            while (!server.isAborted()) {
                LOGGER.debug("Waiting on regionserver to abort..");
            }
        }

    } finally {
        // cleanup
        util.shutdownMiniCluster();
    }
}