Java Code Examples for org.apache.hadoop.hbase.client.Admin#close()
The following examples show how to use
org.apache.hadoop.hbase.client.Admin#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRemoteRestore.java From hbase with Apache License 2.0 | 6 votes |
/** * Verify that a remote restore on a single table is successful. * * @throws Exception if doing the backup or an operation on the tables fails */ @Test public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); String backupId = backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; getBackupAdmin().restore( BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); hba.close(); }
Example 2
Source File: HBaseUsage.java From kylin with Apache License 2.0 | 6 votes |
private static void show() throws IOException { Map<String, List<String>> envs = Maps.newHashMap(); // get all kylin hbase tables KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv(); Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl()); Admin hbaseAdmin = conn.getAdmin(); String tableNamePrefix = kylinConfig.getHBaseTableNamePrefix(); HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables(tableNamePrefix + ".*"); for (HTableDescriptor desc : tableDescriptors) { String host = desc.getValue(IRealizationConstants.HTableTag); if (StringUtils.isEmpty(host)) { add("unknown", desc.getNameAsString(), envs); } else { add(host, desc.getNameAsString(), envs); } } for (Map.Entry<String, List<String>> entry : envs.entrySet()) { System.out.println(entry.getKey() + " has htable count: " + entry.getValue().size()); } hbaseAdmin.close(); }
Example 3
Source File: BaseTest.java From phoenix with Apache License 2.0 | 6 votes |
/** * Disable and drop all the tables except SYSTEM.CATALOG and SYSTEM.SEQUENCE */ private static void disableAndDropNonSystemTables() throws Exception { if (driver == null) return; Admin admin = driver.getConnectionQueryServices(null, null).getAdmin(); try { TableDescriptor[] tables = admin.listTables(); for (TableDescriptor table : tables) { String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getTableName().getName()); if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) { disableAndDropTable(admin, table.getTableName()); } } } finally { admin.close(); } }
Example 4
Source File: LocalHBaseCluster.java From hbase with Apache License 2.0 | 6 votes |
/** * Test things basically work. * @param args * @throws IOException */ public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); LocalHBaseCluster cluster = new LocalHBaseCluster(conf); cluster.startup(); Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); try { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cluster.getClass().getName())); admin.createTable(htd); } finally { admin.close(); } connection.close(); cluster.shutdown(); }
Example 5
Source File: TestFullRestore.java From hbase with Apache License 2.0 | 6 votes |
/** * Verify that a single table is restored using overwrite. * * @throws Exception if doing the backup or an operation on the tables fails */ @Test public void testFullRestoreSingleOverwriteCommand() throws Exception { LOG.info("test full restore on a single table empty table: command-line"); List<TableName> tables = Lists.newArrayList(table1); String backupId = fullTableBackup(tables); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; // restore <backup_root_path> <backup_id> <tables> [tableMapping] String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(tableset, ","), "-o" }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1)); hba.close(); }
Example 6
Source File: HBaseUsage.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
private static void show() throws IOException { Map<String, List<String>> envs = Maps.newHashMap(); // get all kylin hbase tables KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv(); Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl()); Admin hbaseAdmin = conn.getAdmin(); String tableNamePrefix = kylinConfig.getHBaseTableNamePrefix(); HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables(tableNamePrefix + ".*"); for (HTableDescriptor desc : tableDescriptors) { String host = desc.getValue(IRealizationConstants.HTableTag); if (StringUtils.isEmpty(host)) { add("unknown", desc.getNameAsString(), envs); } else { add(host, desc.getNameAsString(), envs); } } for (Map.Entry<String, List<String>> entry : envs.entrySet()) { System.out.println(entry.getKey() + " has htable count: " + entry.getValue().size()); } hbaseAdmin.close(); }
Example 7
Source File: PerformanceEvaluation.java From hbase with Apache License 2.0 | 6 votes |
private void runTest(final Class<? extends TestBase> cmd, TestOptions opts) throws IOException, InterruptedException, ClassNotFoundException, ExecutionException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); Admin admin = null; Connection connection = null; try { connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); checkTable(admin, opts); } finally { if (admin != null) admin.close(); if (connection != null) connection.close(); } if (opts.nomapred) { doLocalClients(opts, getConf()); } else { doMapReduce(opts, getConf()); } }
Example 8
Source File: TestZooKeeper.java From hbase with Apache License 2.0 | 6 votes |
/** * Make sure we can use the cluster */ private void testSanity(final String testName) throws Exception { String tableName = testName + "_" + System.currentTimeMillis(); TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam")).build(); LOG.info("Creating table " + tableName); Admin admin = TEST_UTIL.getAdmin(); try { admin.createTable(desc); } finally { admin.close(); } Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); Put put = new Put(Bytes.toBytes("testrow")); put.addColumn(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata")); LOG.info("Putting table " + tableName); table.put(put); table.close(); }
Example 9
Source File: IntegrationTestDDLMasterFailover.java From hbase with Apache License 2.0 | 5 votes |
@Override void perform() throws IOException { Admin admin = connection.getAdmin(); try { TableDescriptor td = createTableDesc(); TableName tableName = td.getTableName(); if ( admin.tableExists(tableName)){ return; } String numRegionKey = String.format(NUM_REGIONS_KEY, this.getClass().getSimpleName()); numRegions = getConf().getInt(numRegionKey, DEFAULT_NUM_REGIONS); byte[] startKey = Bytes.toBytes("row-0000000000"); byte[] endKey = Bytes.toBytes("row-" + Integer.MAX_VALUE); LOG.info("Creating table:" + td); admin.createTable(td, startKey, endKey, numRegions); Assert.assertTrue("Table: " + td + " was not created", admin.tableExists(tableName)); TableDescriptor freshTableDesc = admin.getDescriptor(tableName); Assert.assertTrue( "After create, Table: " + tableName + " in not enabled", admin.isTableEnabled(tableName)); enabledTables.put(tableName, freshTableDesc); LOG.info("Created table:" + freshTableDesc); } catch (Exception e) { LOG.warn("Caught exception in action: " + this.getClass()); throw e; } finally { admin.close(); } }
Example 10
Source File: HBaseClientManager.java From presto-hbase-connector with Apache License 2.0 | 5 votes |
public void close(Admin admin) { try { admin.close(); } catch (Exception ex) { log.error(ex, ex.getMessage()); } }
Example 11
Source File: GridTableHBaseBenchmark.java From kylin with Apache License 2.0 | 5 votes |
private static void createHTableIfNeeded(Connection conn, String tableName) throws IOException { Admin hbase = conn.getAdmin(); try { boolean tableExist = false; try { hbase.getTableDescriptor(TableName.valueOf(tableName)); tableExist = true; } catch (TableNotFoundException e) { //do nothing? } if (tableExist) { logger.info("HTable '{}' already exists", tableName); return; } logger.info("Creating HTable '{}'", tableName); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fd = new HColumnDescriptor(CF); fd.setBlocksize(CELL_SIZE); desc.addFamily(fd); hbase.createTable(desc); logger.info("HTable '{}' created", tableName); } finally { hbase.close(); } }
Example 12
Source File: TestCompactionFileNotFound.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testCompactionAfterRefresh() throws Exception { Admin admin = util.getAdmin(); table = util.createTable(TEST_TABLE, TEST_FAMILY); try { // Create Multiple store files Put puta = new Put(ROW_A); puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1); table.put(puta); admin.flush(TEST_TABLE); Put putb = new Put(ROW_B); putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2); table.put(putb); admin.flush(TEST_TABLE); Put putc = new Put(ROW_C); putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3); table.put(putc); admin.flush(TEST_TABLE); admin.compact(TEST_TABLE); while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) { Thread.sleep(1000); } table.put(putb); HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE) .getRegionByEncodedName(admin.getRegions(TEST_TABLE).get(0).getEncodedName()); // Refresh store files post compaction, this should not open already compacted files hr1.refreshStoreFiles(true); // Archive the store files and try another compaction to see if all is good for (HStore store : hr1.getStores()) { store.closeAndArchiveCompactedFiles(); } try { hr1.compact(false); } catch (IOException e) { LOG.error("Got an exception during compaction", e); if (e instanceof FileNotFoundException) { Assert.fail("Got a FNFE during compaction"); } else { Assert.fail(); } } } finally { if (admin != null) { admin.close(); } } }
Example 13
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build(); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); WALFactory walFactory = new WALFactory(this.conf, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type region0.close(); region0.getWAL().close(); HRegionServer mockRS = Mockito.mock(HRegionServer.class); // mock out some of the internals of the RSS, so we can run CPs when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS)); region.initialize(); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(UTIL.getConfiguration()); // now check to ensure that we wrote to the index table Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME)); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table Admin admin = UTIL.getAdmin(); admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.close(); }
Example 14
Source File: IndexScrutinyTool.java From phoenix with Apache License 2.0 | 4 votes |
public Job createSubmittableJob(String schemaName, String indexTable, String dataTable, SourceTable sourceTable, Class<IndexScrutinyMapperForTest> mapperClass) throws Exception { Preconditions.checkArgument(SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) || SourceTable.INDEX_TABLE_SOURCE.equals(sourceTable)); final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); final String qIndexTable; if (schemaName != null && !schemaName.isEmpty()) { qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable); } else { qIndexTable = indexTable; } PhoenixConfigurationUtil.setScrutinyDataTable(configuration, qDataTable); PhoenixConfigurationUtil.setScrutinyIndexTable(configuration, qIndexTable); PhoenixConfigurationUtil.setScrutinySourceTable(configuration, sourceTable); PhoenixConfigurationUtil.setScrutinyOutputInvalidRows(configuration, outputInvalidRows); PhoenixConfigurationUtil.setScrutinyOutputMax(configuration, outputMaxRows); final PTable pdataTable = PhoenixRuntime.getTable(connection, qDataTable); final PTable pindexTable = PhoenixRuntime.getTable(connection, qIndexTable); // set CURRENT_SCN for our scan so that incoming writes don't throw off scrutiny configuration.set(PhoenixConfigurationUtil.CURRENT_SCN_VALUE, Long.toString(ts)); // set the source table to either data or index table SourceTargetColumnNames columnNames = SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? new SourceTargetColumnNames.DataSourceColNames(pdataTable, pindexTable) : new SourceTargetColumnNames.IndexSourceColNames(pdataTable, pindexTable); String qSourceTable = columnNames.getQualifiedSourceTableName(); List<String> sourceColumnNames = columnNames.getSourceColNames(); List<String> sourceDynamicCols = columnNames.getSourceDynamicCols(); List<String> targetDynamicCols = columnNames.getTargetDynamicCols(); // Setup the select query against source - we either select the index columns from the // index table, // or select the data table equivalents of the index columns from the data table final String selectQuery = QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null, Hint.NO_INDEX, true); LOGGER.info("Query used on source table to feed the mapper: " + selectQuery); PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat); // if outputting to table, setup the upsert to the output table if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { String upsertStmt = IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols, targetDynamicCols, connection); PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt); LOGGER.info("Upsert statement used for output table: " + upsertStmt); } final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, qSourceTable, columnNames.getQualifiedTargetTableName()); final Job job = Job.getInstance(configuration, jobName); if (!useSnapshot) { PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, selectQuery); } else { // TODO check if using a snapshot works Admin admin = null; String snapshotName; try { final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); admin = pConnection.getQueryServices().getAdmin(); String pdataTableName = pdataTable.getName().getString(); snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString(); admin.snapshot(snapshotName, TableName.valueOf(pdataTableName)); } finally { if (admin != null) { admin.close(); } } // root dir not a subdirectory of hbase dir Path rootDir = new Path("hdfs:///index-snapshot-dir"); FSUtils.setRootDir(configuration, rootDir); // set input for map reduce job using hbase snapshots //PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName, // qDataTable, restoreDir, selectQuery); } TableMapReduceUtil.initCredentials(job); Path outputPath = getOutputPath(configuration, basePath, SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? pdataTable : pindexTable); return configureSubmittableJob(job, outputPath, mapperClass); }
Example 15
Source File: TestMutateRowsRecovery.java From hbase with Apache License 2.0 | 4 votes |
@Test public void MutateRowsAndCheckPostKill() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf("test"); Admin admin = null; Table hTable = null; try { admin = connection.getAdmin(); hTable = connection.getTable(tableName); TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(tableName); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(fam1)); admin.createTable(tableDescriptor); // Add a multi RowMutations rm = new RowMutations(row1); Put p1 = new Put(row1); p1.addColumn(fam1, qual1, value1); p1.setDurability(Durability.SYNC_WAL); rm.add(p1); hTable.mutateRow(rm); // Add a put Put p2 = new Put(row1); p2.addColumn(fam1, qual2, value2); p2.setDurability(Durability.SYNC_WAL); hTable.put(p2); HRegionServer rs1 = TESTING_UTIL.getRSForFirstRegionInTable(tableName); long now = EnvironmentEdgeManager.currentTime(); // Send the RS Load to ensure correct lastflushedseqid for stores rs1.tryRegionServerReport(now - 30000, now); // Kill the RS to trigger wal replay cluster.killRegionServer(rs1.serverName); // Ensure correct data exists Get g1 = new Get(row1); Result result = hTable.get(g1); assertTrue(result.getValue(fam1, qual1) != null); assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual1), value1)); assertTrue(result.getValue(fam1, qual2) != null); assertEquals(0, Bytes.compareTo(result.getValue(fam1, qual2), value2)); } finally { if (admin != null) { admin.close(); } if (hTable != null) { hTable.close(); } } }
Example 16
Source File: IntegrationTestDDLMasterFailover.java From hbase with Apache License 2.0 | 4 votes |
@Override void perform() throws IOException { TableDescriptor selected = selectTable(enabledTables); if (selected == null) { return; } Admin admin = connection.getAdmin(); try { TableName tableName = selected.getTableName(); LOG.info("Disabling table :" + selected); admin.disableTable(tableName); Assert.assertTrue("Table: " + selected + " was not disabled", admin.isTableDisabled(tableName)); TableDescriptor freshTableDesc = admin.getDescriptor(tableName); Assert.assertTrue( "After disable, Table: " + tableName + " is not disabled", admin.isTableDisabled(tableName)); disabledTables.put(tableName, freshTableDesc); LOG.info("Disabled table :" + freshTableDesc); } catch (Exception e){ LOG.warn("Caught exception in action: " + this.getClass()); // TODO workaround // loose restriction for TableNotDisabledException/TableNotEnabledException thrown in sync // operations // 1) when enable/disable starts, the table state is changed to ENABLING/DISABLING (ZK node // in 1.x), which will be further changed to ENABLED/DISABLED once the operation completes // 2) if master failover happens in the middle of the enable/disable operation, the new // master will try to recover the tables in ENABLING/DISABLING state, as programmed in // AssignmentManager#recoverTableInEnablingState() and // AssignmentManager#recoverTableInDisablingState() // 3) after the new master initialization completes, the procedure tries to re-do the // enable/disable operation, which was already done. Ignore those exceptions before change // of behaviors of AssignmentManager in presence of PV2 if (e instanceof TableNotEnabledException) { LOG.warn("Caught TableNotEnabledException in action: " + this.getClass()); e.printStackTrace(); } else { throw e; } } finally { admin.close(); } }
Example 17
Source File: TestCompactionFileNotFound.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testSplitAfterRefresh() throws Exception { Admin admin = util.getAdmin(); table = util.createTable(TEST_TABLE, TEST_FAMILY); try { // Create Multiple store files Put puta = new Put(ROW_A); puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1); table.put(puta); admin.flush(TEST_TABLE); Put putb = new Put(ROW_B); putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2); table.put(putb); admin.flush(TEST_TABLE); Put putc = new Put(ROW_C); putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3); table.put(putc); admin.flush(TEST_TABLE); admin.compact(TEST_TABLE); while (admin.getCompactionState(TEST_TABLE) != CompactionState.NONE) { Thread.sleep(1000); } table.put(putb); HRegion hr1 = (HRegion) util.getRSForFirstRegionInTable(TEST_TABLE) .getRegionByEncodedName(admin.getRegions(TEST_TABLE).get(0).getEncodedName()); // Refresh store files post compaction, this should not open already compacted files hr1.refreshStoreFiles(true); int numRegionsBeforeSplit = admin.getRegions(TEST_TABLE).size(); // Check if we can successfully split after compaction admin.splitRegionAsync(admin.getRegions(TEST_TABLE).get(0).getEncodedNameAsBytes(), ROW_C) .get(); util.waitFor(20000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { int numRegionsAfterSplit = 0; List<RegionServerThread> rst = util.getMiniHBaseCluster().getLiveRegionServerThreads(); for (RegionServerThread t : rst) { numRegionsAfterSplit += t.getRegionServer().getRegions(TEST_TABLE).size(); } // Make sure that the split went through and all the regions are assigned return (numRegionsAfterSplit == numRegionsBeforeSplit + 1 && admin.isTableAvailable(TEST_TABLE)); } }); // Split at this point should not result in the RS being aborted assertEquals(3, util.getMiniHBaseCluster().getLiveRegionServerThreads().size()); } finally { if (admin != null) { admin.close(); } } }
Example 18
Source File: TestPerColumnFamilyFlush.java From hbase with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { int numRegions = Integer.parseInt(args[0]); long numRows = Long.parseLong(args[1]); TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(TABLENAME); tableDescriptor.setMaxFileSize(10L * 1024 * 1024 * 1024); tableDescriptor.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY1)); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY2)); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY3)); Configuration conf = HBaseConfiguration.create(); Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin(); if (admin.tableExists(TABLENAME)) { admin.disableTable(TABLENAME); admin.deleteTable(TABLENAME); } if (numRegions >= 3) { byte[] startKey = new byte[16]; byte[] endKey = new byte[16]; Arrays.fill(endKey, (byte) 0xFF); admin.createTable(tableDescriptor, startKey, endKey, numRegions); } else { admin.createTable(tableDescriptor); } admin.close(); Table table = conn.getTable(TABLENAME); byte[] qf = Bytes.toBytes("qf"); Random rand = new Random(); byte[] value1 = new byte[16]; byte[] value2 = new byte[256]; byte[] value3 = new byte[4096]; for (long i = 0; i < numRows; i++) { Put put = new Put(Hashing.md5().hashLong(i).asBytes()); rand.setSeed(i); rand.nextBytes(value1); rand.nextBytes(value2); rand.nextBytes(value3); put.addColumn(FAMILY1, qf, value1); put.addColumn(FAMILY2, qf, value2); put.addColumn(FAMILY3, qf, value3); table.put(put); if (i % 10000 == 0) { LOG.info(i + " rows put"); } } table.close(); conn.close(); }
Example 19
Source File: TestFullBackupSet.java From hbase with Apache License 2.0 | 4 votes |
/** * Verify that full backup is created on a single table with data correctly. * * @throws Exception if doing the backup or an operation on the tables fails */ @Test public void testFullBackupSetExist() throws Exception { LOG.info("Test full backup, backup set exists"); // Create set try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { String name = "name"; table.addToBackupSet(name, new String[] { table1.getNameAsString() }); List<TableName> names = table.describeBackupSet(name); assertNotNull(names); assertTrue(names.size() == 1); assertTrue(names.get(0).equals(table1)); String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); List<BackupInfo> backups = table.getBackupHistory(); assertTrue(backups.size() == 1); String backupId = backups.get(0).getBackupId(); assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); // Restore from set into other table args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", table1_restore.getNameAsString(), "-o" }; // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); // Verify number of rows in both tables assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); TEST_UTIL.deleteTable(table1_restore); LOG.info("restore into other table is complete"); hba.close(); } }
Example 20
Source File: IntegrationTestDDLMasterFailover.java From hbase with Apache License 2.0 | 4 votes |
@Override void perform() throws IOException { TableDescriptor selected = selectTable(disabledTables); if (selected == null) { return; } ColumnFamilyDescriptor columnDesc = selectFamily(selected); if (columnDesc == null){ return; } Admin admin = connection.getAdmin(); try { TableName tableName = selected.getTableName(); // possible DataBlockEncoding ids DataBlockEncoding[] possibleIds = {DataBlockEncoding.NONE, DataBlockEncoding.PREFIX, DataBlockEncoding.DIFF, DataBlockEncoding.FAST_DIFF, DataBlockEncoding.ROW_INDEX_V1}; short id = possibleIds[RandomUtils.nextInt(0, possibleIds.length)].getId(); LOG.info("Altering encoding of column family: " + columnDesc + " to: " + id + " in table: " + tableName); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(columnDesc) .setDataBlockEncoding(DataBlockEncoding.getEncodingById(id)) .build(); TableDescriptor td = TableDescriptorBuilder.newBuilder(selected) .modifyColumnFamily(cfd) .build(); admin.modifyTable(td); // assertion TableDescriptor freshTableDesc = admin.getDescriptor(tableName); ColumnFamilyDescriptor freshColumnDesc = freshTableDesc.getColumnFamily(columnDesc.getName()); Assert.assertEquals("Encoding of column family: " + columnDesc + " was not altered", freshColumnDesc.getDataBlockEncoding().getId(), id); Assert.assertTrue( "After alter encoding of column family, Table: " + tableName + " is not disabled", admin.isTableDisabled(tableName)); disabledTables.put(tableName, freshTableDesc); LOG.info("Altered encoding of column family: " + freshColumnDesc + " to: " + id + " in table: " + tableName); } catch (Exception e) { LOG.warn("Caught exception in action: " + this.getClass()); throw e; } finally { admin.close(); } }