Java Code Examples for org.apache.hadoop.hbase.client.HBaseAdmin#deleteTable()

The following examples show how to use org.apache.hadoop.hbase.client.HBaseAdmin#deleteTable() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HbaseServiceImpl.java    From searchanalytics-bigdata with MIT License 6 votes vote down vote up
@Override
public void removeAll() {
	LOG.debug("Setting up searchclicks table!");
	String tableName = "searchclicks";
	try {
		HBaseAdmin hBaseAdmin = new HBaseAdmin(miniHBaseCluster.getConf());
		hBaseAdmin.disableTable(tableName);
		hBaseAdmin.deleteTable(tableName);
		hBaseAdmin.close();
	} catch (IOException e) {
		throw new RuntimeException(e);
	}
	;
	setupSearchEventsTable();
	LOG.debug("Setting up searchclicks table done!");
}
 
Example 2
Source File: BaseTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Disable and drop all the tables except SYSTEM.CATALOG and SYSTEM.SEQUENCE
 */
private static void disableAndDropNonSystemTables() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
    try {
        HTableDescriptor[] tables = admin.listTables();
        for (HTableDescriptor table : tables) {
            String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getName());
            if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) {
                admin.disableTable(table.getName());
                admin.deleteTable(table.getName());
            }
        }
    } finally {
        admin.close();
    }
}
 
Example 3
Source File: NativeHBaseTypesIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_NATIVE_BYTES);
            admin.deleteTable(HBASE_NATIVE_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }
        @SuppressWarnings("deprecation")
        HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
        HColumnDescriptor columnDescriptor =  new HColumnDescriptor(FAMILY_NAME);
        columnDescriptor.setKeepDeletedCells(true);
        descriptor.addFamily(columnDescriptor);
        admin.createTable(descriptor, SPLITS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
Example 4
Source File: HBaseCleaner.java    From DistributedCrawler with Apache License 2.0 6 votes vote down vote up
public static void clean(String tableName) throws IOException{
	HBaseAdmin admin = new HBaseAdmin(HBasePool.getInstance().getConf());
	admin.disableTable(tableName);
	admin.deleteTable(tableName);
	/*for(HTableDescriptor d : admin.listTables()){
		System.out.println(d.getNameAsString());
		for(HColumnDescriptor cd : d.getColumnFamilies()){
			System.out.println("===="+cd.getNameAsString());
		}
	}*/
	HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);  
       tableDescriptor.addFamily(new HColumnDescriptor("content"));  
       tableDescriptor.addFamily(new HColumnDescriptor("title"));  
	admin.createTable(tableDescriptor);
	admin.close();
}
 
Example 5
Source File: ReverseScanIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
    // Ensures our split points will be used
    // TODO: do deletePriorTables before test?
    Connection conn = DriverManager.getConnection(getUrl());
    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    try {
        admin.disableTable(TestUtil.ATABLE_NAME);
        admin.deleteTable(TestUtil.ATABLE_NAME);
    } catch (TableNotFoundException e) {
    } finally {
        admin.close();
        conn.close();
    }
 }
 
Example 6
Source File: GenerateTestingHTables.java    From SpyGlass with Apache License 2.0 6 votes vote down vote up
/**
 * Method to disable and delete HBase Tables i.e. "int-test-01"
 */
private static void deleteTestTable(String tableName) throws IOException {

	// Reset configuration
	config.clear();
	config.set("hbase.zookeeper.quorum", QUORUM);
	config.set("hbase.zookeeper.property.clientPort", QUORUM_PORT);

	HBaseAdmin hbase = new HBaseAdmin(config);

	if (hbase.tableExists(tableName)) {
		LOG.info("Table: " + tableName + " exists.");
		hbase.disableTable(tableName);
		hbase.deleteTable(tableName);
		LOG.info("Table: " + tableName + " disabled and deleted.");
	} else {
		LOG.info("Table: " + tableName + " does not exist.");
	}

	hbase.close();
}
 
Example 7
Source File: HBaseFactoryTest.java    From bigdata-tutorial with Apache License 2.0 6 votes vote down vote up
/**
 * @param tableName
 * @return
 */
public boolean deleteTable(String tableName) throws IOException {

	HBaseAdmin admin = new HBaseAdmin(conn);
	if (admin.tableExists(tableName)) {
		try {
			admin.disableTable(tableName);
			admin.deleteTable(tableName);
			LOGGER.info(">>>> Table {} delete success!", tableName);
		} catch (Exception ex) {
			LOGGER.error("delete table error:", ex);
			return false;
		}
	} else {
		LOGGER.warn(">>>> Table {} delete but not exist.", tableName);
	}
	admin.close();
	return true;
}
 
Example 8
Source File: HBaseDDLHandler.java    From bigdata-tutorial with Apache License 2.0 6 votes vote down vote up
/**
 * @param tableName
 * @return
 */
public boolean deleteTable(String tableName) throws IOException {

	HBaseAdmin admin = new HBaseAdmin(getConnPool().getConn());
	if (admin.tableExists(tableName)) {
		try {
			if (admin.isTableEnabled(tableName)) {
				admin.disableTable(tableName);
			}
			admin.deleteTable(tableName);
			LOGGER.info(">>>> Table {} delete success!", tableName);
		} catch (Exception ex) {
			LOGGER.error("delete table error:", ex);
			return false;
		}
	} else {
		LOGGER.warn(">>>> Table {} delete but not exist.", tableName);
	}
	admin.close();
	return true;
}
 
Example 9
Source File: NativeHBaseTypesTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_NATIVE_BYTES);
            admin.deleteTable(HBASE_NATIVE_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }
        HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
        HColumnDescriptor columnDescriptor =  new HColumnDescriptor(FAMILY_NAME);
        columnDescriptor.setKeepDeletedCells(true);
        descriptor.addFamily(columnDescriptor);
        admin.createTable(descriptor, SPLITS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
Example 10
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void deleteSequenceSnapshot(HBaseAdmin admin) throws SQLException {
    byte[] tableName = getSequenceSnapshotName();
    try {
        admin.disableTable(tableName);;
        admin.deleteTable(tableName);
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    }
}
 
Example 11
Source File: HbaseLocalClusterIntegrationTest.java    From hadoop-mini-clusters with Apache License 2.0 5 votes vote down vote up
private static void deleteHbaseTable(String tableName, Configuration configuration) throws Exception {

        final HBaseAdmin admin = new HBaseAdmin(configuration);
        if (admin.tableExists(tableName)) {
            admin.disableTable(tableName);
            admin.deleteTable(tableName);
        }
    }
 
Example 12
Source File: UserProfileDatasetExample.java    From kite with Apache License 2.0 5 votes vote down vote up
/**
 * The constructor will start by registering the schemas with the meta store
 * table in HBase, and create the required tables to run.
 */
public UserProfileDatasetExample() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HBaseAdmin admin = new HBaseAdmin(conf);

  // Delete the table if it exists so we start fresh.
  if (admin.tableExists("kite_example_user_profiles")) {
    admin.disableTable("kite_example_user_profiles");
    admin.deleteTable("kite_example_user_profiles");
  }

  HBaseDatasetRepository repo = new HBaseDatasetRepository.Builder()
      .configuration(conf).build();

  // TODO: change to use namespace (CDK-140)

  DatasetDescriptor userProfileDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserProfileModel2.SCHEMA$).build();
  userProfileDataset = repo.create("default", "kite_example_user_profiles.UserProfileModel2",
      userProfileDatasetDescriptor);

  DatasetDescriptor userActionsDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserActionsModel2.SCHEMA$).build();
  userActionsDataset = repo.create("default", "kite_example_user_profiles.UserActionsModel2",
      userActionsDatasetDescriptor);

  DatasetDescriptor userProfileActionsDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserProfileActionsModel2.SCHEMA$).build();
  userProfileActionsDataset = repo.create("default", "kite_example_user_profiles.UserProfileActionsProtocol2",
      userProfileActionsDatasetDescriptor);

}
 
Example 13
Source File: DynamicColumnTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_DYNAMIC_COLUMNS_BYTES);
            admin.deleteTable(HBASE_DYNAMIC_COLUMNS_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {}
        ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
Example 14
Source File: ProductMetricsIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void destroyTable() throws Exception {
    // Physically delete HBase table so that splits occur as expected for each test
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        try {
            admin.disableTable(PRODUCT_METRICS_NAME);
            admin.deleteTable(PRODUCT_METRICS_NAME);
        } catch (TableNotFoundException e) {
        }
   } finally {
            admin.close();
    }
}
 
Example 15
Source File: ProductMetricsTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
private static void destroyTable() throws Exception {
    // Physically delete HBase table so that splits occur as expected for each test
    Properties props = new Properties(TEST_PROPERTIES);
    ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        try {
            admin.disableTable(PRODUCT_METRICS_NAME);
            admin.deleteTable(PRODUCT_METRICS_NAME);
        } catch (TableNotFoundException e) {
        }
   } finally {
            admin.close();
    }
}
 
Example 16
Source File: RemoveTables.java    From hadoop-arch-book with Apache License 2.0 5 votes vote down vote up
public static void executeDeleteTables(Configuration config) throws IOException {
  HBaseAdmin admin = new HBaseAdmin(config);

  if (admin.tableExists(HBaseTableMetaModel.profileCacheTableName)) {
    admin.disableTable(HBaseTableMetaModel.profileCacheTableName);
    admin.deleteTable(HBaseTableMetaModel.profileCacheTableName);
  }

  if (admin.tableExists(HBaseTableMetaModel.validationRulesTableName)) {
    admin.disableTable(HBaseTableMetaModel.validationRulesTableName);
    admin.deleteTable(HBaseTableMetaModel.validationRulesTableName);
  }

  admin.close();
}
 
Example 17
Source File: IndexHandlerIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@After
public void cleanup() throws Exception {
    HBaseAdmin admin = UTIL.getHBaseAdmin();
    admin.disableTable(TestTable.getTableName());
    admin.deleteTable(TestTable.getTableName());
}
 
Example 18
Source File: IICreateHTableJob.java    From Kylin with Apache License 2.0 4 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
    Options options = new Options();

    try {
        options.addOption(OPTION_II_NAME);
        options.addOption(OPTION_HTABLE_NAME);
        parseOptions(options, args);

        String tableName = getOptionValue(OPTION_HTABLE_NAME);
        String iiName = getOptionValue(OPTION_II_NAME);

        KylinConfig config = KylinConfig.getInstanceFromEnv();
        IIManager iiManager = IIManager.getInstance(config);
        IIInstance ii = iiManager.getII(iiName);
        int sharding = ii.getDescriptor().getSharding();

        HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
        HColumnDescriptor cf = new HColumnDescriptor(IIDesc.HBASE_FAMILY);
        cf.setMaxVersions(1);
        //cf.setCompressionType(Algorithm.LZO);
        cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
        tableDesc.addFamily(cf);
        tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix());

        Configuration conf = HBaseConfiguration.create(getConf());
        if (User.isHBaseSecurityEnabled(conf)) {
            // add coprocessor for bulk load
            tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
        }

        DeployCoprocessorCLI.deployCoprocessor(tableDesc);

        // drop the table first
        HBaseAdmin admin = new HBaseAdmin(conf);
        if (admin.tableExists(tableName)) {
            admin.disableTable(tableName);
            admin.deleteTable(tableName);
        }

        // create table
        byte[][] splitKeys = getSplits(sharding);
        if (splitKeys.length == 0)
            splitKeys = null;
        admin.createTable(tableDesc, splitKeys);
        if (splitKeys != null) {
            for (int i = 0; i < splitKeys.length; i++) {
                System.out.println("split key " + i + ": " + BytesUtil.toHex(splitKeys[i]));
            }
        }
        System.out.println("create hbase table " + tableName + " done.");
        admin.close();

        return 0;
    } catch (Exception e) {
        printUsage(options);
        throw e;
    }
}
 
Example 19
Source File: QueryDatabaseMetaDataTest.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@Test
public void testCreateOnExistingTable() throws Exception {
    PhoenixConnection pconn = DriverManager.getConnection(PHOENIX_JDBC_URL, TEST_PROPERTIES).unwrap(PhoenixConnection.class);
    String tableName = MDTEST_NAME;
    String schemaName = MDTEST_SCHEMA_NAME;
    byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
    byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
    byte[] cfC = Bytes.toBytes("c");
    byte[][] familyNames = new byte[][] {cfB, cfC};
    byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
    HBaseAdmin admin = pconn.getQueryServices().getAdmin();
    try {
        admin.disableTable(htableName);
        admin.deleteTable(htableName);
        admin.enableTable(htableName);
    } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
    }
    
    HTableDescriptor descriptor = new HTableDescriptor(htableName);
    for (byte[] familyName : familyNames) {
        HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
        descriptor.addFamily(columnDescriptor);
    }
    admin.createTable(descriptor);
        
    long ts = nextTimestamp();
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
    PhoenixConnection conn1 = DriverManager.getConnection(PHOENIX_JDBC_URL, props).unwrap(PhoenixConnection.class);
    ensureTableCreated(getUrl(), tableName, null, ts);
    
    descriptor = admin.getTableDescriptor(htableName);
    assertEquals(3,descriptor.getColumnFamilies().length);
    HColumnDescriptor cdA = descriptor.getFamily(cfA);
    assertTrue(cdA.getKeepDeletedCells());
    assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using WITH
    assertEquals(1,cdA.getMaxVersions());// Overriden using WITH
    HColumnDescriptor cdB = descriptor.getFamily(cfB);
    assertTrue(cdB.getKeepDeletedCells());
    assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the original value.
    // CF c should stay the same since it's not a Phoenix cf.
    HColumnDescriptor cdC = descriptor.getFamily(cfC);
    assertNotNull("Column family not found", cdC);
    assertFalse(cdC.getKeepDeletedCells());
    assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
    assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
    assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
    assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
    admin.close();
     
    int rowCount = 5;
    String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
    PreparedStatement ps = conn1.prepareStatement(upsert);
    for (int i = 0; i < rowCount; i++) {
        ps.setString(1, Integer.toString(i));
        ps.setInt(2, i+1);
        ps.setInt(3, i+2);
        ps.execute();
    }
    conn1.commit();
    conn1.close();
    
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
    Connection conn2 = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
    String query = "SELECT count(1) FROM " + tableName;
    ResultSet rs = conn2.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals(rowCount, rs.getLong(1));
    
    query = "SELECT id, col1,col2 FROM " + tableName;
    rs = conn2.createStatement().executeQuery(query);
    for (int i = 0; i < rowCount; i++) {
        assertTrue(rs.next());
        assertEquals(Integer.toString(i),rs.getString(1));
        assertEquals(i+1, rs.getInt(2));
        assertEquals(i+2, rs.getInt(3));
    }
    assertFalse(rs.next());
    conn2.close();
}
 
Example 20
Source File: TestWALReplayWithIndexWritesAndCompressedWAL.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
 * seqids.
 * @throws Exception on failure
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
  final String tableNameStr = "testReplayEditsWrittenViaHRegion";
  final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false);
  final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
  deleteDir(basedir);
  final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
  
  //setup basic indexing for the table
  // enable indexing to a non-existant index table
  byte[] family = new byte[] { 'a' };
  ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(htd);

  // create the region + its WAL
  HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
  region0.close();
  region0.getLog().closeAndDelete();
  HLog wal = createWAL(this.conf);
  RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
  // mock out some of the internals of the RSS, so we can run CPs
  Mockito.when(mockRS.getWAL()).thenReturn(wal);
  RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
  Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
  ServerName mockServerName = Mockito.mock(ServerName.class);
  Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234");
  Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
  HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
  long seqid = region.initialize();
  // HRegionServer usually does this. It knows the largest seqid across all regions.
  wal.setSequenceNumber(seqid);
  
  //make an attempted write to the primary that should also be indexed
  byte[] rowkey = Bytes.toBytes("indexed_row_key");
  Put p = new Put(rowkey);
  p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
  region.put(new Put[] { p });

  // we should then see the server go down
  Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
    Mockito.any(Exception.class));
  region.close(true);
  wal.close();

  // then create the index table so we are successful on WAL replay
  CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

  // run the WAL split and setup the region
  runWALSplit(this.conf);
  HLog wal2 = createWAL(this.conf);
  HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

  // initialize the region - this should replay the WALEdits from the WAL
  region1.initialize();

  // now check to ensure that we wrote to the index table
  HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
  int indexSize = getKeyValueCount(index);
  assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
  Get g = new Get(rowkey);
  final Result result = region1.get(g);
  assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

  // cleanup the index table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.disableTable(INDEX_TABLE_NAME);
  admin.deleteTable(INDEX_TABLE_NAME);
  admin.close();
}