org.apache.hadoop.hbase.HTableDescriptor Java Examples

The following examples show how to use org.apache.hadoop.hbase.HTableDescriptor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestConstraints.java    From hbase with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void testSimpleReadWrite() throws Throwable {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
  Constraints.add(desc, WorksConstraint.class);

  List<? extends Constraint> constraints = Constraints.getConstraints(desc,
      this.getClass().getClassLoader());
  assertEquals(1, constraints.size());

  assertEquals(WorksConstraint.class, constraints.get(0).getClass());

  // Check that we can add more than 1 constraint and that ordering is
  // preserved
  Constraints.add(desc, AlsoWorks.class, NameConstraint.class);
  constraints = Constraints.getConstraints(desc, this.getClass()
      .getClassLoader());
  assertEquals(3, constraints.size());

  assertEquals(WorksConstraint.class, constraints.get(0).getClass());
  assertEquals(AlsoWorks.class, constraints.get(1).getClass());
  assertEquals(NameConstraint.class, constraints.get(2).getClass());

}
 
Example #2
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected SortedSet<byte[]> getTransactionalRegions() throws IOException {
  SortedSet<byte[]> regions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
  try (Admin admin = connection.getAdmin()) {
    HTableDescriptor[] tableDescriptors = admin.listTables();
    LOG.debug("Got {} tables to process", tableDescriptors == null ? 0 : tableDescriptors.length);
    if (tableDescriptors != null) {
      for (HTableDescriptor tableDescriptor : tableDescriptors) {
        if (isTransactionalTable(tableDescriptor)) {
          List<HRegionInfo> tableRegions = admin.getTableRegions(tableDescriptor.getTableName());
          LOG.debug("Regions for table {}: {}", tableDescriptor.getTableName(), tableRegions);
          if (tableRegions != null) {
            for (HRegionInfo region : tableRegions) {
              regions.add(region.getRegionName());
            }
          }
        } else {
          LOG.debug("{} is not a transactional table", tableDescriptor.getTableName());
        }
      }
    }
  }
  return regions;
}
 
Example #3
Source File: HBaseTransactionPruningPlugin.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Create the prune state table given the {@link TableName} if the table doesn't exist already.
 *
 * @param stateTable prune state table name
 */
protected void createPruneTable(TableName stateTable) throws IOException {
  try (Admin admin = this.connection.getAdmin()) {
    if (admin.tableExists(stateTable)) {
      LOG.debug("Not creating pruneStateTable {} since it already exists.",
                stateTable.getNameWithNamespaceInclAsString());
      return;
    }

    HTableDescriptor htd = new HTableDescriptor(stateTable);
    htd.addFamily(new HColumnDescriptor(DataJanitorState.FAMILY).setMaxVersions(1));
    admin.createTable(htd);
    LOG.info("Created pruneTable {}", stateTable.getNameWithNamespaceInclAsString());
  } catch (TableExistsException ex) {
    // Expected if the prune state table is being created at the same time by another client
    LOG.debug("Not creating pruneStateTable {} since it already exists.",
              stateTable.getNameWithNamespaceInclAsString(), ex);
  }
}
 
Example #4
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static Table createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return testUtil.getConnection().getTable(TableName.valueOf(tableName));
}
 
Example #5
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
    HTableInterface htable = getTable(tableName);
    try {
        return htable.getTableDescriptor();
    } catch (IOException e) {
        if(e instanceof org.apache.hadoop.hbase.TableNotFoundException ||
            e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) {
          byte[][] schemaAndTableName = new byte[2][];
          SchemaUtil.getVarChars(tableName, schemaAndTableName);
          throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), Bytes.toString(schemaAndTableName[1]));
        }
        throw new RuntimeException(e);
    } finally {
        Closeables.closeQuietly(htable);
    }
}
 
Example #6
Source File: HBaseCleaner.java    From DistributedCrawler with Apache License 2.0 6 votes vote down vote up
public static void clean(String tableName) throws IOException{
	HBaseAdmin admin = new HBaseAdmin(HBasePool.getInstance().getConf());
	admin.disableTable(tableName);
	admin.deleteTable(tableName);
	/*for(HTableDescriptor d : admin.listTables()){
		System.out.println(d.getNameAsString());
		for(HColumnDescriptor cd : d.getColumnFamilies()){
			System.out.println("===="+cd.getNameAsString());
		}
	}*/
	HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);  
       tableDescriptor.addFamily(new HColumnDescriptor("content"));  
       tableDescriptor.addFamily(new HColumnDescriptor("title"));  
	admin.createTable(tableDescriptor);
	admin.close();
}
 
Example #7
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static Table createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return testUtil.getConnection().getTable(TableName.valueOf(tableName));
}
 
Example #8
Source File: IndexLoadBalancerIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void createUserAndIndexTable(TableName tableName, TableName indexTableName)
        throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("cf"));
    char c = 'A';
    byte[][] split = new byte[20][];
    for (int i = 0; i < 20; i++) {
        byte[] b = { (byte) c };
        split[i] = b;
        c++;
    }
    admin.createTable(htd, split);
    HTableDescriptor iHtd = new HTableDescriptor(indexTableName);
    iHtd.addFamily(new HColumnDescriptor("cf"));
    iHtd.setValue(IndexLoadBalancer.PARENT_TABLE_KEY, tableName.toBytes());
    admin.createTable(iHtd, split);
}
 
Example #9
Source File: HBaseBasedAuditRepository.java    From incubator-atlas with Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists() throws AtlasException {
    Admin admin = null;
    try {
        admin = connection.getAdmin();
        LOG.info("Checking if table {} exists", tableName.getNameAsString());
        if (!admin.tableExists(tableName)) {
            LOG.info("Creating table {}", tableName.getNameAsString());
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY);
            columnFamily.setMaxVersions(1);
            columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            columnFamily.setCompressionType(Compression.Algorithm.GZ);
            columnFamily.setBloomFilterType(BloomType.ROW);
            tableDescriptor.addFamily(columnFamily);
            admin.createTable(tableDescriptor);
        } else {
            LOG.info("Table {} exists", tableName.getNameAsString());
        }
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(admin);
    }
}
 
Example #10
Source File: VisibilityController.java    From hbase with Apache License 2.0 6 votes vote down vote up
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getConnection(), LABELS_TABLE_NAME)) {
      TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
        new TableDescriptorBuilder.ModifyableTableDescriptor(LABELS_TABLE_NAME);
      ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
        new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(LABELS_TABLE_FAMILY);
      familyDescriptor.setBloomFilterType(BloomType.NONE);
      // We will cache all the labels. No need of normal
      // table block cache.
      familyDescriptor.setBlockCacheEnabled(false);
      tableDescriptor.setColumnFamily(familyDescriptor);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      tableDescriptor.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
        admin.createTable(tableDescriptor);
      }
    }
  }
 
Example #11
Source File: HFileOutputFormat3.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
        throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
Example #12
Source File: HelloWorldTest.java    From java-docs-samples with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() {
  projectId = requireEnv("GOOGLE_CLOUD_PROJECT");
  instanceId = requireEnv(INSTANCE_ENV);
  try (Connection connection = BigtableConfiguration.connect(projectId, instanceId)) {
    Admin admin = connection.getAdmin();
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(TABLE_ID));
    descriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY_NAME));
    admin.createTable(descriptor);

    Table table = connection.getTable(TableName.valueOf(Bytes.toBytes(TABLE_ID)));

    String rowKey = "phone#4c410523#20190401";
    Put put = new Put(Bytes.toBytes(rowKey));

    put.addColumn(
        Bytes.toBytes(COLUMN_FAMILY_NAME), Bytes.toBytes("os_name"), Bytes.toBytes("android"));
    table.put(put);

  } catch (Exception e) {
    System.out.println("Error during beforeClass: \n" + e.toString());
  }
}
 
Example #13
Source File: TestHBaseTimestampStorage.java    From phoenix-omid with Apache License 2.0 6 votes vote down vote up
@BeforeMethod
public void setUp() throws Exception {
    HBaseAdmin admin = testutil.getHBaseAdmin();

    if (!admin.tableExists(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME))) {
        HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
        HColumnDescriptor datafam = new HColumnDescriptor(DEFAULT_TIMESTAMP_STORAGE_CF_NAME);
        datafam.setMaxVersions(Integer.MAX_VALUE);
        desc.addFamily(datafam);

        admin.createTable(desc);
    }

    if (admin.isTableDisabled(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME))) {
        admin.enableTable(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME));
    }
    HTableDescriptor[] tables = admin.listTables();
    for (HTableDescriptor t : tables) {
        LOG.info(t.getNameAsString());
    }
}
 
Example #14
Source File: DstClusterUtil.java    From kylin with Apache License 2.0 6 votes vote down vote up
public boolean checkExist(TableName htableName, CubeSegment segment) throws IOException {
    if (!htableExists(htableName)) {
        return false;
    }
    Table table = hbaseConn.getTable(htableName);
    HTableDescriptor tableDesc = table.getTableDescriptor();
    if (segment.toString().equals(tableDesc.getValue(HTableSegmentTag))) {
        if (hbaseAdmin.isTableEnabled(htableName)) {
            return true;
        } else {
            hbaseAdmin.deleteTable(htableName);
            logger.info("htable {} is deleted", htableName);
            return false;
        }
    }
    throw new RuntimeException(
            "htable name " + htableName + " has been used by " + tableDesc.getValue(HTableSegmentTag));
}
 
Example #15
Source File: Describe.java    From examples with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  // Instantiate default HBase configuration object.
  // Configuration file must be in the classpath
  Configuration conf = HBaseConfiguration.create();
  // tag::DESCRIBE
  HBaseAdmin admin = new HBaseAdmin(conf);
  HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf("crc"));
  Collection<HColumnDescriptor> families = desc.getFamilies();
  System.out.println("Table " + desc.getTableName() + " has " + families.size() + " family(ies)");
  for (Iterator<HColumnDescriptor> iterator = families.iterator(); iterator.hasNext();) {
    HColumnDescriptor family = iterator.next();
    System.out.println("Family details: " + family);
  }
  // end::DESCRIBE
  admin.close();
}
 
Example #16
Source File: HbaseTemplate.java    From canal-1.1.3 with Apache License 2.0 6 votes vote down vote up
public void createTable(String tableName, String... familyNames) {
    try (HBaseAdmin admin = (HBaseAdmin) getConnection().getAdmin()) {

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
        // 添加列簇
        if (familyNames != null) {
            for (String familyName : familyNames) {
                HColumnDescriptor hcd = new HColumnDescriptor(familyName);
                desc.addFamily(hcd);
            }
        }
        admin.createTable(desc);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
Example #17
Source File: RemoteDictionaryStore.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public void init(String[] cfs) throws IOException {
    logger.debug("Checking streaming remote store for {} at {}.", tableName, String.join(", ", cfs));
    Connection conn = getConnection();
    Admin admin = conn.getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(hbaseTableName));
    for (String family : cfs) {
        HColumnDescriptor fd = new HColumnDescriptor(family);
        desc.addFamily(fd);
    }
    DistributedLock lock = KylinConfig.getInstanceFromEnv().getDistributedLockFactory().lockForCurrentProcess();
    try {
        boolean locked = lock.lock(lockPath());
        if (locked && !admin.tableExists(TableName.valueOf(hbaseTableName))) {
            logger.info("Create htable with {}.", desc);
            admin.createTable(desc);
        } else {
            logger.info("Table exists or cannot fetch lock {}", desc);
        }
    } finally {
        admin.close();
        if (lock != null && lock.isLockedByMe(lockPath())) {
            lock.unlock(lockPath());
        }
    }
    table = conn.getTable(TableName.valueOf(hbaseTableName));
}
 
Example #18
Source File: HBaseAdminWrapperListTableTest.java    From hbase-tools with Apache License 2.0 5 votes vote down vote up
@Test
public void testListTablesAsRWUser() throws Exception {
    createAdditionalTable(tableName + "2");

    final HTableDescriptor[] hTableDescriptors;
    if (securedCluster) {
        PrivilegedExceptionAction listTables = new PrivilegedExceptionAction() {
            @Override
            public Object run() throws Exception {
                return admin.listTables();
            }
        };
        hTableDescriptors = (HTableDescriptor[]) USER_RW.runAs(listTables);
    } else {
        hTableDescriptors = admin.listTables();
    }

    int tableCount = 0;
    for (HTableDescriptor hTableDescriptor : hTableDescriptors) {
        System.out.println(hTableDescriptor);
        tableCount++;
    }

    if (miniCluster) {
        Assert.assertEquals(2, tableCount);
    }
}
 
Example #19
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment e) throws IOException {
  if (e instanceof RegionCoprocessorEnvironment) {
    RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
    this.cacheSupplier = getTransactionStateCacheSupplier(env);
    this.cache = cacheSupplier.get();

    HTableDescriptor tableDesc = env.getRegion().getTableDesc();
    for (HColumnDescriptor columnDesc : tableDesc.getFamilies()) {
      String columnTTL = columnDesc.getValue(TxConstants.PROPERTY_TTL);
      long ttl = 0;
      if (columnTTL != null) {
        try {
          ttl = Long.parseLong(columnTTL);
          LOG.info("Family " + columnDesc.getNameAsString() + " has TTL of " + columnTTL);
        } catch (NumberFormatException nfe) {
          LOG.warn("Invalid TTL value configured for column family " + columnDesc.getNameAsString() +
                     ", value = " + columnTTL);
        }
      }
      ttlByFamily.put(columnDesc.getName(), ttl);
    }

    this.allowEmptyValues = getAllowEmptyValues(env, tableDesc);
    this.txMaxLifetimeMillis = getTxMaxLifetimeMillis(env);
    this.readNonTxnData = Boolean.valueOf(tableDesc.getValue(TxConstants.READ_NON_TX_DATA));
    if (readNonTxnData) {
      LOG.info("Reading pre-existing data enabled for table " + tableDesc.getNameAsString());
    }
    initializePruneState(env);
  }
}
 
Example #20
Source File: HBaseResourceStore.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Override
protected String createMetaStoreUUID() throws IOException {
    try (final Admin hbaseAdmin = HBaseConnection.get(metadataUrl).getAdmin()) {
        final String metaStoreName = metadataUrl.getIdentifier();
        final HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(metaStoreName));
        String uuid = desc.getValue(HBaseConnection.HTABLE_UUID_TAG);
        if (uuid != null)
            return uuid;
        return UUID.randomUUID().toString();
    } catch (Exception e) {
        return null;
    }
}
 
Example #21
Source File: HtdHbaseSchemaVerifier.java    From pinpoint with Apache License 2.0 5 votes vote down vote up
private boolean verifySchema(HTableDescriptor expected, HTableDescriptor actual) {
    if (!expected.getTableName().equals(actual.getTableName())) {
        return false;
    }
    for (HColumnDescriptor expectedHcd : expected.getFamilies()) {
        if (!actual.hasFamily(expectedHcd.getName())) {
            return false;
        }
    }
    return true;
}
 
Example #22
Source File: HBaseDDLHandlerTest.java    From bigdata-tutorial with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
	String quorum = "192.168.0.30,192.168.0.31,192.168.0.32";
	//quorum = "192.168.8.191,192.168.1.192,192.168.1.193";
	int port = 2181;
	String znode = "/hyperbase1";
	HBaseConnPool connPool = new HBaseClientManager(quorum, port, znode);
	HBaseDDLHandler ddlHandler = new HBaseDDLHandler(connPool);

	String tableName = "demo_test";
	System.out.println("=============================== : delete");
	ddlHandler.deleteTable(tableName);

	String columnFamily = "cf";
	System.out.println("=============================== : create");
	ddlHandler.createTable(tableName, columnFamily, "cf2");

	System.out.println("=============================== : desc");
	HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
	System.out.println("=============================== : alter");
	HBaseAdmin admin = new HBaseAdmin(connPool.getConn());
	admin.disableTable(tableName);
	HTableInterface htable = ddlHandler.getTable(tableName);
	HTableDescriptor tableDesc = admin.getTableDescriptor(htable.getTableName());
	tableDesc.removeFamily(Bytes.toBytes("cf2"));
	HColumnDescriptor newhcd = new HColumnDescriptor("cf3");
	newhcd.setMaxVersions(2);
	newhcd.setKeepDeletedCells(KeepDeletedCells.TRUE);
	tableDesc.addFamily(newhcd);

	admin.modifyTable(tableName, tableDesc);
	admin.enableTable(tableName);
	admin.close();

	System.out.println("=============================== : desc");
	HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
	System.out.println("=============================== : delete");
	ddlHandler.deleteTable(tableName);

	connPool.closeConn();
}
 
Example #23
Source File: CommonHBaseConnection.java    From pentaho-hadoop-shims with Apache License 2.0 5 votes vote down vote up
@Override
public void createTable( String tableName, List<String> colFamilyNames, Properties creationProps ) throws Exception {
  checkConfiguration();

  HTableDescriptor tableDescription = m_factory.getHBaseTableDescriptor( tableName );

  for ( String familyName : colFamilyNames ) {
    HColumnDescriptor c = new HColumnDescriptor( familyName );
    configureColumnDescriptor( c, creationProps );
    tableDescription.getClass().getMethod( "addFamily", HColumnDescriptor.class ).invoke( tableDescription, c );
  }

  m_admin.createTable( tableDescription );
}
 
Example #24
Source File: HbaseSchemaServiceImpl.java    From pinpoint with Apache License 2.0 5 votes vote down vote up
private boolean initFromExistingTables(String namespace, String compression, List<ChangeSet> changeSets, List<HTableDescriptor> currentHtds) {
    logger.info("[{}] Initializing hbase schema from existing tables.", namespace);

    // Replay change sets one by one and compare it against the current hbase schema.
    // If they match, all change sets up to that point are seen as already applied.
    HbaseSchemaCommandManager initCommandManager = new HbaseSchemaCommandManager(namespace, compression);
    List<ChangeSet> appliedChangeSets = new ArrayList<>();
    List<ChangeSet> changeSetsToApply = new ArrayList<>();
    for (ChangeSet changeSet : changeSets) {
        initCommandManager.applyChangeSet(changeSet);
        changeSetsToApply.add(changeSet);
        if (hbaseSchemaVerifier.verifySchemas(initCommandManager.getSchemaSnapshot(), currentHtds)) {
            appliedChangeSets.addAll(changeSetsToApply);
            changeSetsToApply = new ArrayList<>();
        }
    }

    if (appliedChangeSets.isEmpty()) {
        logger.info("[{}] Current table schema does not match any schema from the change sets.", namespace);
    } else {
        List<String> appliedChangeSetIds = appliedChangeSets.stream().map(ChangeSet::getId).collect(Collectors.toList());
        logger.info("[{}] Change sets already applied : {}", namespace, appliedChangeSetIds);
    }

    List<SchemaChangeLog> executedLogs = schemaChangeLogService.recordChangeSets(namespace, appliedChangeSets);
    if (changeSetsToApply.isEmpty()) {
        logger.info("[{}] Hbase schema already at latest version.", namespace);
        return false;
    }

    HbaseSchemaCommandManager updateCommandManager = new HbaseSchemaCommandManager(namespace, compression, currentHtds);
    return applyChangeSets(updateCommandManager, changeSetsToApply, executedLogs);
}
 
Example #25
Source File: AlterTableIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSettingNotHColumnNorPhoenixPropertyEndsUpAsHTableProperty() throws Exception {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
	Connection conn = DriverManager.getConnection(getUrl(), props);
	try {
		String ddl = "create table IF NOT EXISTS RANDMONPROPTABLE ("
				+ " id char(1) NOT NULL,"
				+ " col1 integer NOT NULL,"
				+ " col2 bigint NOT NULL,"
				+ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
				+ " )";
		conn.createStatement().execute(ddl);
		ddl = "ALTER TABLE RANDMONPROPTABLE ADD NEWCF.COL3 INTEGER NEWCF.UNKNOWN_PROP='ABC'";
		try {
			conn.createStatement().execute(ddl);
			fail();
		} catch (SQLException e) {
			assertEquals(SQLExceptionCode.CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN.getErrorCode(), e.getErrorCode());
		}
		ddl = "ALTER TABLE RANDMONPROPTABLE SET UNKNOWN_PROP='ABC'";
		conn.createStatement().execute(ddl);
		try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
			HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("RANDMONPROPTABLE"));
			assertEquals("ABC", tableDesc.getValue("UNKNOWN_PROP"));
		}
	} finally {
		conn.close();
	}
}
 
Example #26
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void createSequenceSnapshot(HBaseAdmin admin, PhoenixConnection conn) throws SQLException {
    byte[] tableName = getSequenceSnapshotName();
    HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES);
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(columnDesc);
    try {
        admin.createTable(desc);
        copyTable(conn, PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES, tableName);
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    }
}
 
Example #27
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
private HRegion updateTtl(HRegion region, byte[] family, long ttl) throws Exception {
  region.close();
  HTableDescriptor htd = region.getTableDesc();
  HColumnDescriptor cfd = htd.getFamily(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  return HRegion.openHRegion(region.getRegionInfo(), htd, region.getWAL(), conf,
                             new LocalRegionServerServices(conf, ServerName.valueOf(
                               InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())), null);
}
 
Example #28
Source File: CubeMigrationCLI.java    From Kylin with Apache License 2.0 5 votes vote down vote up
private static void undo(Opt opt) throws IOException, InterruptedException {
    logger.info("Undo operation: " + opt.toString());

    switch (opt.type) {
    case CHANGE_HTABLE_HOST: {
        String tableName = (String) opt.params[0];
        HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
        hbaseAdmin.disableTable(tableName);
        desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix());
        hbaseAdmin.modifyTable(tableName, desc);
        hbaseAdmin.enableTable(tableName);
        break;
    }
    case COPY_FILE_IN_META: {
        // no harm
        logger.info("Undo for COPY_FILE_IN_META is ignored");
        break;
    }
    case COPY_DICT_OR_SNAPSHOT: {
        // no harm
        logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored");
        break;
    }
    case RENAME_FOLDER_IN_HDFS: {
        String srcPath = (String) opt.params[1];
        String dstPath = (String) opt.params[0];

        if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) {
            hdfsFS.rename(new Path(srcPath), new Path(dstPath));
            logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath);
        }
        break;
    }
    case ADD_INTO_PROJECT: {
        logger.info("Undo for ADD_INTO_PROJECT is ignored");
        break;
    }
    }
}
 
Example #29
Source File: AlterTableTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void testAlterColumnFamilyProperty() throws Exception {

    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    
    String ddl = "CREATE TABLE test_table " +
            "  (a_string varchar not null, col1 integer" +
            "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
    try {
            conn.createStatement().execute(ddl);
          
            conn.createStatement().execute("ALTER TABLE TEST_TABLE ADD col2 integer IN_MEMORY=true");
            
            HTableInterface htable1 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("TEST_TABLE")); 
            HTableDescriptor htableDesciptor1 = htable1.getTableDescriptor();
            HColumnDescriptor hcolumnDescriptor1 = htableDesciptor1.getFamily(Bytes.toBytes("_0"));
            assertTrue(hcolumnDescriptor1.isInMemory());
           
            try {
                
                conn.createStatement().execute("ALTER TABLE TEST_TABLE SET IN_MEMORY=false");
                fail("Should have caught exception.");
                
            } catch (SQLException e) {
                assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1025 (42Y84): Unsupported property set in ALTER TABLE command."));
            } 
    }finally {
        conn.close();
    }
 }
 
Example #30
Source File: BalanceBooks.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
protected void createTableIfNotExists(Configuration conf, byte[] tableName, byte[][] columnFamilies)
    throws IOException {
  try (Admin admin = this.conn.getAdmin()) {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    for (byte[] family : columnFamilies) {
      HColumnDescriptor columnDesc = new HColumnDescriptor(family);
      columnDesc.setMaxVersions(Integer.MAX_VALUE);
      desc.addFamily(columnDesc);
    }
    desc.addCoprocessor(TransactionProcessor.class.getName());
    admin.createTable(desc);
  }
}