org.apache.hadoop.hbase.TableName Java Examples
The following examples show how to use
org.apache.hadoop.hbase.TableName.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBaseRowInputFormat.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void connectToTable() { if (this.conf == null) { this.conf = HBaseConfiguration.create(); } try { Connection conn = ConnectionFactory.createConnection(conf); super.table = (HTable) conn.getTable(TableName.valueOf(tableName)); } catch (TableNotFoundException tnfe) { LOG.error("The table " + tableName + " not found ", tnfe); throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe); } catch (IOException ioe) { LOG.error("Exception while creating connection to HBase.", ioe); throw new RuntimeException("Cannot create connection to HBase.", ioe); } }
Example #2
Source File: HBaseSimpleDemo.java From bigdata-tutorial with Apache License 2.0 | 6 votes |
public Boolean createTable(String tableName, String familyName) throws Exception { HBaseAdmin admin = new HBaseAdmin(hconn); if (admin.tableExists(tableName)) { LOGGER.warn(">>>> Table {} exists!", tableName); admin.close(); return false; } HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); tableDesc.addFamily(new HColumnDescriptor(familyName)); admin.createTable(tableDesc); LOGGER.info(">>>> Table {} create success!", tableName); admin.close(); return true; }
Example #3
Source File: TestRegionSplitter.java From hbase with Apache License 2.0 | 6 votes |
/** * Test creating a pre-split table using the UniformSplit algorithm. */ @Test public void testCreatePresplitTableUniform() throws Exception { List<byte[]> expectedBounds = new ArrayList<>(17); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); expectedBounds.add(new byte[] { 0x10, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { 0x20, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { 0x30, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { 0x40, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { 0x50, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { 0x60, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { 0x70, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0x80, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0x90, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] {(byte)0xa0, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { (byte) 0xb0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xc0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xd0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] {(byte)0xe0, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { (byte) 0xf0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); // Do table creation/pre-splitting and verification of region boundaries preSplitTableAndVerify(expectedBounds, UniformSplit.class.getSimpleName(), TableName.valueOf(name.getMethodName())); }
Example #4
Source File: DataJanitorStateTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Before public void beforeTest() throws Exception { pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false, // Prune state table is a non-transactional table, hence no transaction co-processor Collections.<String>emptyList()); table.close(); connection = HConnectionManager.createConnection(conf); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return connection.getTable(pruneStateTable); } }); }
Example #5
Source File: HBaseEntitySchemaManager.java From eagle with Apache License 2.0 | 6 votes |
private void createTable(EntityDefinition entityDefinition) throws IOException { String tableName = entityDefinition.getTable(); if (admin.tableExists(tableName)) { LOG.info("Table {} already exists", tableName); } else { HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName)); // Adding column families to table descriptor HColumnDescriptor columnDescriptor = new HColumnDescriptor(entityDefinition.getColumnFamily()); columnDescriptor.setBloomFilterType(BloomType.ROW); //columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setMaxVersions(DEFAULT_MAX_VERSIONS); tableDescriptor.addFamily(columnDescriptor); // Execute the table through admin admin.createTable(tableDescriptor); LOG.info("Successfully create Table {}", tableName); } }
Example #6
Source File: ParallelResultScanner.java From pinpoint with Apache License 2.0 | 6 votes |
public ParallelResultScanner(TableName tableName, HbaseAccessor hbaseAccessor, ExecutorService executor, Scan originalScan, AbstractRowKeyDistributor keyDistributor, int numParallelThreads) throws IOException { if (hbaseAccessor == null) { throw new NullPointerException("hbaseAccessor"); } if (executor == null) { throw new NullPointerException("executor"); } if (originalScan == null) { throw new NullPointerException("originalScan"); } this.keyDistributor = Objects.requireNonNull(keyDistributor, "keyDistributor"); final ScanTaskConfig scanTaskConfig = new ScanTaskConfig(tableName, hbaseAccessor, keyDistributor, originalScan.getCaching()); final Scan[] splitScans = splitScans(originalScan); this.scanTasks = createScanTasks(scanTaskConfig, splitScans, numParallelThreads); this.nextResults = new Result[scanTasks.size()]; for (ScanTask scanTask : scanTasks) { executor.execute(scanTask); } }
Example #7
Source File: TestHBase_1_1_2_ClientService.java From localization_nifi with Apache License 2.0 | 6 votes |
@Test(expected = IllegalArgumentException.class) public void testScanWithInvalidFilter() throws InitializationException, IOException { final String tableName = "nifi"; final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); // Mock an HBase Table so we can verify the put operations later final Table table = Mockito.mock(Table.class); when(table.getName()).thenReturn(TableName.valueOf(tableName)); // create the controller service and link it to the test processor final MockHBaseClientService service = configureHBaseClientService(runner, table); runner.assertValid(service); // perform a scan and verify the four rows were returned final CollectingResultHandler handler = new CollectingResultHandler(); final HBaseClientService hBaseClientService = runner.getProcessContext().getProperty(TestProcessor.HBASE_CLIENT_SERVICE) .asControllerService(HBaseClientService.class); // this should throw IllegalArgumentException final String filter = "this is not a filter"; hBaseClientService.scan(tableName, new ArrayList<Column>(), filter, System.currentTimeMillis(), handler); }
Example #8
Source File: TestTableInputFormat.java From hbase with Apache License 2.0 | 6 votes |
@Override public void configure(JobConf job) { try { Connection connection = ConnectionFactory.createConnection(job); Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); // mandatory initializeTable(connection, exampleTable.getName()); byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } catch (IOException exception) { throw new RuntimeException("Failed to configure for job.", exception); } }
Example #9
Source File: IntegrationTestBigLinkedListWithVisibility.java From hbase with Apache License 2.0 | 6 votes |
private void createTable(Admin admin, TableName tableName, boolean setVersion, boolean acl) throws IOException { if (!admin.tableExists(tableName)) { TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(tableName); ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor = new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_NAME); if (setVersion) { familyDescriptor.setMaxVersions(DEFAULT_TABLES_COUNT); } tableDescriptor.setColumnFamily(familyDescriptor); admin.createTable(tableDescriptor); if (acl) { LOG.info("Granting permissions for user " + USER.getShortName()); Permission.Action[] actions = { Permission.Action.READ }; try { AccessControlClient.grant(ConnectionFactory.createConnection(getConf()), tableName, USER.getShortName(), null, null, actions); } catch (Throwable e) { LOG.error(HBaseMarkers.FATAL, "Error in granting permission for the user " + USER.getShortName(), e); throw new IOException(e); } } } }
Example #10
Source File: TestRegionReplicasWithModifyTable.java From hbase with Apache License 2.0 | 6 votes |
private static void enableReplicationByModification(final TableName tableName, boolean withReplica, int initialReplicaCount, int enableReplicaCount, int splitCount) throws IOException, InterruptedException { HTableDescriptor htd = new HTableDescriptor(tableName); if (withReplica) { htd.setRegionReplication(initialReplicaCount); } if (splitCount > 0) { byte[][] splits = getSplits(splitCount); table = HTU.createTable(htd, new byte[][] { f }, splits, new Configuration(HTU.getConfiguration())); } else { table = HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, new Configuration(HTU.getConfiguration())); } HBaseTestingUtility.setReplicas(HTU.getAdmin(), table.getName(), enableReplicaCount); }
Example #11
Source File: SchemaResource.java From hbase with Apache License 2.0 | 6 votes |
private Response update(final TableSchemaModel model, final boolean replace, final UriInfo uriInfo) { try { TableName name = TableName.valueOf(tableResource.getName()); Admin admin = servlet.getAdmin(); if (replace || !admin.tableExists(name)) { return replace(name, model, uriInfo, admin); } else { return update(name, model, uriInfo, admin); } } catch (Exception e) { servlet.getMetrics().incrementFailedPutRequests(1); // Avoid re-unwrapping the exception if (e instanceof WebApplicationException) { throw (WebApplicationException) e; } return processException(e); } }
Example #12
Source File: StorageCleanJobHbaseUtilTest.java From kylin with Apache License 2.0 | 6 votes |
@Test public void test() throws IOException { HBaseAdmin hBaseAdmin = mock(HBaseAdmin.class); HTableDescriptor[] hds = new HTableDescriptor[2]; HTableDescriptor d1 = mock(HTableDescriptor.class); HTableDescriptor d2 = mock(HTableDescriptor.class); hds[0] = d1; hds[1] = d2; when(d1.getValue("KYLIN_HOST")).thenReturn("../examples/test_metadata/"); when(d2.getValue("KYLIN_HOST")).thenReturn("../examples/test_metadata/"); when(d1.getTableName()).thenReturn(TableName.valueOf("KYLIN_J9TE08D9IA")); String toBeDel = "to-be-del"; when(d2.getTableName()).thenReturn(TableName.valueOf(toBeDel)); when(hBaseAdmin.listTables("KYLIN_.*")).thenReturn(hds); when(hBaseAdmin.tableExists(toBeDel)).thenReturn(true); when(hBaseAdmin.isTableEnabled(toBeDel)).thenReturn(false); StorageCleanJobHbaseUtil.cleanUnusedHBaseTables(hBaseAdmin, true, 100000, 1); ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class); verify(hBaseAdmin).deleteTable(captor.capture()); assertEquals(Lists.newArrayList(toBeDel), captor.getAllValues()); }
Example #13
Source File: HMaster.java From hbase with Apache License 2.0 | 6 votes |
/** * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table * <p/> * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of * meta table. And if this is a new cluster, this method will return immediately as there will be * no namespace table/region. * @return True if namespace table is up/online. */ private boolean waitForNamespaceOnline() throws IOException { TableState nsTableState = MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME); if (nsTableState == null || nsTableState.isDisabled()) { // this means we have already migrated the data and disabled or deleted the namespace table, // or this is a new deploy which does not have a namespace table from the beginning. return true; } List<RegionInfo> ris = this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); if (ris.isEmpty()) { // maybe this will not happen any more, but anyway, no harm to add a check here... return true; } // Else there are namespace regions up in meta. Ensure they are assigned before we go on. for (RegionInfo ri : ris) { if (!isRegionOnline(ri)) { return false; } } return true; }
Example #14
Source File: TestScannersFromClientSide.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testReadExpiredDataForRawScan() throws IOException { TableName tableName = name.getTableName(); long ts = System.currentTimeMillis() - 10000; byte[] value = Bytes.toBytes("expired"); try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value)); assertArrayEquals(value, table.get(new Get(ROW)).getValue(FAMILY, QUALIFIER)); TEST_UTIL.getAdmin().modifyColumnFamily(tableName, new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY) .setTimeToLive(5)); try (ResultScanner scanner = table.getScanner(FAMILY)) { assertNull(scanner.next()); } try (ResultScanner scanner = table.getScanner(new Scan().setRaw(true))) { assertArrayEquals(value, scanner.next().getValue(FAMILY, QUALIFIER)); assertNull(scanner.next()); } } }
Example #15
Source File: HBaseSyncPostCommitter.java From phoenix-omid with Apache License 2.0 | 6 votes |
private void addShadowCell(HBaseCellId cell, HBaseTransaction tx, SettableFuture<Void> updateSCFuture, Map<TableName,List<Mutation>> mutations) throws IOException, InterruptedException { Put put = new Put(cell.getRow()); put.addColumn(cell.getFamily(), CellUtils.addShadowCellSuffixPrefix(cell.getQualifier(), 0, cell.getQualifier().length), cell.getTimestamp(), Bytes.toBytes(tx.getCommitTimestamp())); TableName table = cell.getTable().getHTable().getName(); List<Mutation> tableMutations = mutations.get(table); if (tableMutations == null) { ArrayList<Mutation> newList = new ArrayList<>(); newList.add(put); mutations.put(table, newList); } else { tableMutations.add(put); if (tableMutations.size() > MAX_BATCH_SIZE) { flushMutations(table, tableMutations); mutations.remove(table); } } }
Example #16
Source File: TableInputFormat.java From hgraphdb with Apache License 2.0 | 5 votes |
@Override protected void initialize(JobContext context) throws IOException { TableName tableName = TableName.valueOf(getConf().get(tablePropertyKey)); try { if (isMock()) { initializeTable(MockConnectionFactory.createConnection(new Configuration(getConf())), tableName); } else { initializeTable(ConnectionFactory.createConnection(new Configuration(getConf())), tableName); } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } }
Example #17
Source File: BulkWriteChannelInvoker.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public BulkWritesResult invoke(BulkWrites write) throws IOException { TableName tableName=tableInfoFactory.getTableInfo(this.tableName); CoprocessorRpcChannel channel = channelFactory.newChannel(tableName,write.getRegionKey()); boolean cacheCheck = false; try { SpliceMessage.SpliceIndexService service = ProtobufUtil.newServiceStub(SpliceMessage.SpliceIndexService.class, channel); SpliceMessage.BulkWriteRequest.Builder builder = SpliceMessage.BulkWriteRequest.newBuilder(); byte[] requestBytes = compressor.compress(write); builder.setBytes(ZeroCopyLiteralByteString.wrap(requestBytes)); SpliceMessage.BulkWriteRequest bwr = builder.build(); BlockingRpcCallback<SpliceMessage.BulkWriteResponse> doneCallback =new BlockingRpcCallback<>(); ServerRpcController controller = new ServerRpcController(); service.bulkWrite(controller, bwr, doneCallback); if (controller.failed()){ IOException error=controller.getFailedOn(); clearCacheIfNeeded(error); cacheCheck=true; if(error!=null) throw pef.processRemoteException(error); else throw pef.fromErrorString(controller.errorText()); } SpliceMessage.BulkWriteResponse bulkWriteResponse = doneCallback.get(); byte[] bytes = bulkWriteResponse.getBytes().toByteArray(); if(bytes==null || bytes.length<=0){ Logger logger=Logger.getLogger(BulkWriteChannelInvoker.class); logger.error("zero-length bytes returned with a null error for encodedString: "+write.getBulkWrites().iterator().next().getEncodedStringName()); } return compressor.decompress(bytes,BulkWritesResult.class); } catch (Exception e) { if (!cacheCheck) clearCacheIfNeeded(e); throw pef.processRemoteException(e); } }
Example #18
Source File: HBaseParallelDecoder.java From geowave with Apache License 2.0 | 5 votes |
public HBaseScanner( final Connection connection, final TableName tableName, final Scan sourceScanner, final int partitionKeyLength) { this.connection = connection; this.tableName = tableName; this.sourceScanner = sourceScanner; this.partitionKeyLength = partitionKeyLength; }
Example #19
Source File: TestFileArchiverNotifierImpl.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testParseOldNamespaceSnapshotSize() throws Exception { final Admin admin = TEST_UTIL.getAdmin(); final TableName fakeQuotaTableName = TableName.valueOf(testName.getMethodName()); final TableName tn = TableName.valueOf(testName.getMethodName() + "1"); if (admin.tableExists(fakeQuotaTableName)) { admin.disableTable(fakeQuotaTableName); admin.deleteTable(fakeQuotaTableName); } TableDescriptor desc = TableDescriptorBuilder.newBuilder(fakeQuotaTableName).setColumnFamily( ColumnFamilyDescriptorBuilder.of(QuotaTableUtil.QUOTA_FAMILY_USAGE)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(QuotaUtil.QUOTA_FAMILY_INFO)).build(); admin.createTable(desc); final String ns = ""; try (Table fakeQuotaTable = conn.getTable(fakeQuotaTableName)) { FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn); // Verify no record is treated as zero assertEquals(0, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); // Set an explicit value of zero fakeQuotaTable.put(QuotaTableUtil.createPutForNamespaceSnapshotSize(ns, 0L)); assertEquals(0, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); // Set a non-zero value fakeQuotaTable.put(QuotaTableUtil.createPutForNamespaceSnapshotSize(ns, 1024L)); assertEquals(1024L, notifier.getPreviousNamespaceSnapshotSize(fakeQuotaTable, ns)); } }
Example #20
Source File: HFileReplicator.java From hbase with Apache License 2.0 | 5 votes |
public Void replicate() throws IOException { // Copy all the hfiles to the local file system Map<String, Path> tableStagingDirsMap = copyHFilesToStagingDir(); int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); for (Entry<String, Path> tableStagingDir : tableStagingDirsMap.entrySet()) { String tableNameString = tableStagingDir.getKey(); Path stagingDir = tableStagingDir.getValue(); TableName tableName = TableName.valueOf(tableNameString); // Prepare collection of queue of hfiles to be loaded(replicated) Deque<LoadQueueItem> queue = new LinkedList<>(); BulkLoadHFilesTool.prepareHFileQueue(conf, connection, tableName, stagingDir, queue, false, false); if (queue.isEmpty()) { LOG.warn("Did not find any files to replicate in directory {}", stagingDir.toUri()); return null; } fsDelegationToken.acquireDelegationToken(sinkFs); try { doBulkLoad(conf, tableName, stagingDir, queue, maxRetries); } finally { cleanup(stagingDir); } } return null; }
Example #21
Source File: TestFSTableDescriptors.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testUpdates() throws IOException { final String name = "testUpdates"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); htds.update(htd); htds.update(htd); htds.update(htd); }
Example #22
Source File: MetricsRegionServer.java From hbase with Apache License 2.0 | 5 votes |
public void updateGet(TableName tn, long t) { if (tableMetrics != null && tn != null) { tableMetrics.updateGet(tn, t); } if (t > 1000) { serverSource.incrSlowGet(); } serverSource.updateGet(t); userAggregate.updateGet(t); }
Example #23
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void startMiniCluster() throws Exception { // Setup the configuration to start HBase cluster with the invalid list pruning enabled conf = HBaseConfiguration.create(); conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true); // Flush prune data to table quickly, so that tests don't need have to wait long to see updates conf.setLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, 0L); AbstractHBaseTableTest.startMiniCluster(); TransactionStateStorage txStateStorage = new InMemoryTransactionStateStorage(); TransactionManager txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); // Do some transactional data operations txDataTable1 = TableName.valueOf("invalidListPruneTestTable1"); HTable hTable = createTable(txDataTable1.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, TxConstants.ConflictDetection.ROW)) { TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); for (int i = 0; i < MAX_ROWS; ++i) { txTable.put(new Put(Bytes.toBytes(i)).add(family, qualifier, Bytes.toBytes(i))); } txContext.finish(); } testUtil.flush(txDataTable1); txManager.stopAndWait(); pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); connection = HConnectionManager.createConnection(conf); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return connection.getTable(pruneStateTable); } }); }
Example #24
Source File: MasterCoprocessorHost.java From hbase with Apache License 2.0 | 5 votes |
/** * Invoked just before a split * @param tableName the table where the region belongs to * @param splitRow the split point * @param user the user * @throws IOException */ public void preSplitRegionAction( final TableName tableName, final byte[] splitRow, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegionAction(this, tableName, splitRow); } }); }
Example #25
Source File: TestMultiRowRangeFilter.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testReverseMultiRowRangeFilterWithinTable() throws IOException { tableName = TableName.valueOf(name.getMethodName()); Table ht = TEST_UTIL.createTable(tableName, family); generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); scan.setReversed(true); List<RowRange> ranges = Arrays.asList( new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(30), true), new RowRange(Bytes.toBytes(50), true, Bytes.toBytes(60), true) ); MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); scan.setFilter(filter); List<Integer> expectedResults = new ArrayList<>(); for (int i = 60; i >= 50; i--) { expectedResults.add(i); } for (int i = 30; i >= 20; i--) { expectedResults.add(i); } List<Cell> results = getResults(ht, scan); List<Integer> actualResults = new ArrayList<>(); StringBuilder sb = new StringBuilder(); for (Cell result : results) { int observedValue = Bytes.toInt( result.getRowArray(), result.getRowOffset(), result.getRowLength()); actualResults.add(observedValue); if (sb.length() > 0) { sb.append(", "); } sb.append(observedValue); } assertEquals("Saw results: " + sb.toString(), 22, results.size()); }
Example #26
Source File: TestAsyncTableRpcPriority.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testScan() throws IOException, InterruptedException { try (ResultScanner scanner = conn.getTable(TableName.valueOf(name.getMethodName())) .getScanner(new Scan().setCaching(1).setMaxResultSize(1).setPriority(19))) { assertNotNull(scanner.next()); Thread.sleep(1000); } Thread.sleep(1000); // open, next, several renew lease, and then close verify(stub, atLeast(4)).scan(assertPriority(19), any(ScanRequest.class), any()); }
Example #27
Source File: SpaceQuotaHelperForTests.java From hbase with Apache License 2.0 | 5 votes |
/** * Removes all quotas defined in the HBase quota table. */ void removeAllQuotas(Connection conn) throws IOException { // Wait for the quota table to be created if (!conn.getAdmin().tableExists(QuotaUtil.QUOTA_TABLE_NAME)) { waitForQuotaTable(conn); } else { // Or, clean up any quotas from previous test runs. QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration()); try { for (QuotaSettings quotaSettings : scanner) { final String namespace = quotaSettings.getNamespace(); final TableName tableName = quotaSettings.getTableName(); final String userName = quotaSettings.getUserName(); if (namespace != null) { LOG.debug("Deleting quota for namespace: " + namespace); QuotaUtil.deleteNamespaceQuota(conn, namespace); } else if (tableName != null) { LOG.debug("Deleting quota for table: " + tableName); QuotaUtil.deleteTableQuota(conn, tableName); } else if (userName != null) { LOG.debug("Deleting quota for user: " + userName); QuotaUtil.deleteUserQuota(conn, userName); } } } finally { if (scanner != null) { scanner.close(); } } } }
Example #28
Source File: TestReplicaWithCluster.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testReplicaGetWithPrimaryDown() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor(TableName.valueOf("testCreateDeleteTable"), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(RegionServerStoppedCopro.class.getName()); try { Table table = HTU.createTable(hdt, new byte[][] { f }, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); // Flush so it can be picked by the replica refresher thread HTU.flush(table.getName()); // Sleep for some time until data is picked up by replicas try { Thread.sleep(2 * REFRESH_PERIOD); } catch (InterruptedException e1) { LOG.error(e1.toString(), e1); } // But if we ask for stale we will get it Get g = new Get(row); g.setConsistency(Consistency.TIMELINE); Result r = table.get(g); Assert.assertTrue(r.isStale()); } finally { HTU.getAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); } }
Example #29
Source File: TestCheckAndMutate.java From hbase with Apache License 2.0 | 5 votes |
private Table createTable() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); Table table = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); return table; }
Example #30
Source File: PermissionStorage.java From hbase with Apache License 2.0 | 5 votes |
static private void removeTablePermissions(TableName tableName, byte[] column, Table table, boolean closeTable) throws IOException { Scan scan = new Scan(); scan.addFamily(ACL_LIST_FAMILY); String columnName = Bytes.toString(column); scan.setFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( String.format("(%s%s%s)|(%s%s)$", ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName)))); Set<byte[]> qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR); ResultScanner scanner = null; try { scanner = table.getScanner(scan); for (Result res : scanner) { for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { qualifierSet.add(q); } } if (qualifierSet.size() > 0) { Delete d = new Delete(tableName.getName()); for (byte[] qualifier : qualifierSet) { d.addColumns(ACL_LIST_FAMILY, qualifier); } table.delete(d); } } finally { if (scanner != null) { scanner.close(); } if (closeTable) { table.close(); } } }