Java Code Examples for org.apache.hadoop.hbase.TableName#valueOf()

The following examples show how to use org.apache.hadoop.hbase.TableName#valueOf() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSpaceLimitSettings.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testQuotaMerging() throws IOException {
  TableName tn = TableName.valueOf("foo");
  QuotaSettings originalSettings = QuotaSettingsFactory.limitTableSpace(
      tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE);
  QuotaSettings largerSizeLimit = QuotaSettingsFactory.limitTableSpace(
      tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE);
  QuotaSettings differentPolicy = QuotaSettingsFactory.limitTableSpace(
      tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES);
  QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace(
      "ns1", 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES);

  assertEquals(originalSettings.merge(largerSizeLimit), largerSizeLimit);
  assertEquals(originalSettings.merge(differentPolicy), differentPolicy);
  try {
    originalSettings.merge(incompatibleSettings);
    fail("Should not be able to merge a Table space quota with a namespace space quota.");
  } catch (IllegalArgumentException e) {
    //pass
  }
}
 
Example 2
Source File: TestSnapshotScannerHDFSAclController.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRevokeGlobal1() throws Exception {
  final String grantUserName = name.getMethodName();
  User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {});
  String namespace = name.getMethodName();
  TableName table1 = TableName.valueOf(namespace, name.getMethodName());
  String snapshot1 = namespace + "t1";

  TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1);
  snapshotAndWait(snapshot1, table1);
  SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ);
  SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ);
  TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1);
  assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName));
  checkUserAclEntry(FS, helper.getGlobalRootPaths(), grantUserName, false, false);
  deleteTable(table1);
}
 
Example 3
Source File: TestDisableTableProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRecoveryAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");

  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Disable procedure && kill the executor
  long procId = procExec.submitProcedure(
    new DisableTableProcedure(procExec.getEnvironment(), tableName, false));

  // Restart the executor and execute the step twice
  MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);

  MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName);
}
 
Example 4
Source File: GuidePostsCacheWrapperTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void invalidateAllTableDescriptor() {
    Set<byte[]> cfSet = new HashSet<>();
    cfSet.add(columnFamily1);
    cfSet.add(columnFamily2);



    TableDescriptor tableDesc = Mockito.mock(TableDescriptor.class);
    TableName tableName = TableName.valueOf(table);

    Mockito.when(tableDesc.getColumnFamilyNames()).thenReturn(cfSet);
    Mockito.when(tableDesc.getTableName()).thenReturn(tableName);

    wrapper.invalidateAll(tableDesc);
    Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily1));
    Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily2));
}
 
Example 5
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static HTable createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return new HTable(testUtil.getConfiguration(), tableName);
}
 
Example 6
Source File: TestFilterListOnMini.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFiltersWithOR() throws Exception {
  TableName tn = TableName.valueOf(name.getMethodName());
  Table table = TEST_UTIL.createTable(tn, new String[] { "cf1", "cf2" });
  byte[] CF1 = Bytes.toBytes("cf1");
  byte[] CF2 = Bytes.toBytes("cf2");
  Put put1 = new Put(Bytes.toBytes("0"));
  put1.addColumn(CF1, Bytes.toBytes("col_a"), Bytes.toBytes(0));
  table.put(put1);
  Put put2 = new Put(Bytes.toBytes("0"));
  put2.addColumn(CF2, Bytes.toBytes("col_b"), Bytes.toBytes(0));
  table.put(put2);
  FamilyFilter filterCF1 =
      new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF1));
  FamilyFilter filterCF2 =
      new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF2));
  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE);
  filterList.addFilter(filterCF1);
  filterList.addFilter(filterCF2);
  Scan scan = new Scan();
  scan.setFilter(filterList);
  ResultScanner scanner = table.getScanner(scan);
  LOG.info("Filter list: " + filterList);
  for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    Assert.assertEquals(2, rr.size());
  }
}
 
Example 7
Source File: HBaseStreamWriteMain.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
@Override
public void open(int taskNumber, int numTasks) throws IOException {
    connection = ConnectionFactory.createConnection(configuration);
    TableName tableName = TableName.valueOf(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_TABLE_NAME));
    Admin admin = connection.getAdmin();
    if (!admin.tableExists(tableName)) { //检查是否有该表,如果没有,创建
        log.info("==============不存在表 = {}", tableName);
        admin.createTable(new HTableDescriptor(TableName.valueOf(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_TABLE_NAME)))
                .addFamily(new HColumnDescriptor(ExecutionEnvUtil.PARAMETER_TOOL.get(HBASE_COLUMN_NAME))));
    }
    table = connection.getTable(tableName);

    this.taskNumber = String.valueOf(taskNumber);
}
 
Example 8
Source File: CubeMigrationCrossClusterCLI.java    From kylin with Apache License 2.0 5 votes vote down vote up
private boolean checkHTableExist(CubeSegment segment) throws IOException {
    String tableName = segment.getStorageLocationIdentifier();
    TableName htableName = TableName.valueOf(tableName);
    if (!dstCluster.checkExist(htableName, segment)) {
        return false;
    }

    if (!checkHTableEquals(tableName)) {
        logger.warn("although htable {} exists in destination, the details data are different", tableName);
        dstCluster.deleteHTable(tableName);
        return false;
    }
    return true;
}
 
Example 9
Source File: TestCreateTableProcedure.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testSimpleCreateWithSplits() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  testSimpleCreate(tableName, splitKeys);
}
 
Example 10
Source File: TestActivePolicyEnforcement.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testNoQuotaReturnsSingletonPolicyEnforcement() {
  final ActivePolicyEnforcement ape = new ActivePolicyEnforcement(
      Collections.emptyMap(), Collections.emptyMap(), rss);
  final TableName tableName = TableName.valueOf("my_table");
  SpaceViolationPolicyEnforcement policyEnforcement = ape.getPolicyEnforcement(tableName);
  // This should be the same exact instance, the singleton
  assertTrue(policyEnforcement == MissingSnapshotViolationPolicyEnforcement.getInstance());
  assertEquals(1, ape.getLocallyCachedPolicies().size());
  Entry<TableName,SpaceViolationPolicyEnforcement> entry =
      ape.getLocallyCachedPolicies().entrySet().iterator().next();
  assertTrue(policyEnforcement == entry.getValue());
}
 
Example 11
Source File: TestMultiRowRangeFilter.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testMultiRowRangeWithFilterListOrOperator() throws IOException {
  tableName = TableName.valueOf(name.getMethodName());
  Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.readAllVersions();

  List<RowRange> ranges1 = new ArrayList<>();
  ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges1.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter1 = new MultiRowRangeFilter(ranges1);

  List<RowRange> ranges2 = new ArrayList<>();
  ranges2.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false));
  ranges2.add(new RowRange(Bytes.toBytes(80), true, Bytes.toBytes(90), false));

  MultiRowRangeFilter filter2 = new MultiRowRangeFilter(ranges2);

  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE);
  filterList.addFilter(filter1);
  filterList.addFilter(filter2);
  scan.setFilter(filterList);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);
  List<Cell> results3 = getScanResult(Bytes.toBytes(80), Bytes.toBytes(90), ht);

  assertEquals(results1.size() + results2.size() + results3.size(),resultsSize);

  ht.close();
}
 
Example 12
Source File: TableInputFormat.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
  if (conf.get(SPLIT_TABLE) != null) {
    TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
    try (Connection conn = ConnectionFactory.createConnection(getConf())) {
      try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
        return rl.getStartEndKeys();
      }
    }
  }

  return super.getStartEndKeys();
}
 
Example 13
Source File: HBaseTableInfoFactory.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public TableName getTableInfo(byte[] name) {
    return TableName.valueOf(namespaceBytes,name);
}
 
Example 14
Source File: TestWALReaderOnSecureWAL.java    From hbase with Apache License 2.0 4 votes vote down vote up
private Path writeWAL(final WALFactory wals, final String tblName, boolean offheap)
    throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
  conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, SecureWALCellCodec.class,
    WALCellCodec.class);
  try {
    TableName tableName = TableName.valueOf(tblName);
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    scopes.put(tableName.getName(), 0);
    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
    final int total = 10;
    final byte[] row = Bytes.toBytes("row");
    final byte[] family = Bytes.toBytes("family");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    // Write the WAL
    WAL wal = wals.getWAL(regionInfo);
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      KeyValue kv = new KeyValue(row, family, Bytes.toBytes(i), value);
      if (offheap) {
        ByteBuffer bb = ByteBuffer.allocateDirect(kv.getBuffer().length);
        bb.put(kv.getBuffer());
        ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(bb, 0, kv.getLength());
        kvs.add(offheapKV);
      } else {
        kvs.add(kv);
      }
      wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
        System.currentTimeMillis(), mvcc, scopes), kvs);
    }
    wal.sync();
    final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal);
    wal.shutdown();

    return walPath;
  } finally {
    // restore the cell codec class
    conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, clsName);
  }
}
 
Example 15
Source File: TestVisibilityLabelsWithDeletes.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersions() throws Exception {
  setAuths();
  final TableName tableName = TableName.valueOf(testName.getMethodName());
  try (Table table = doPuts(tableName)) {
    TEST_UTIL.getAdmin().flush(tableName);
    PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        try (Connection connection = ConnectionFactory.createConnection(conf);
          Table table = connection.getTable(tableName)) {
          Delete d = new Delete(row1);
          d.setCellVisibility(new CellVisibility(
              "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")"));
          d.addColumns(fam, qual, 125L);
          table.delete(d);
        } catch (Throwable t) {
          throw new IOException(t);
        }
        return null;
      }
    };
    SUPERUSER.runAs(actiona);

    TEST_UTIL.getAdmin().flush(tableName);
    Scan s = new Scan();
    s.readVersions(5);
    s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
    ResultScanner scanner = table.getScanner(s);
    Result[] next = scanner.next(3);
    assertTrue(next.length == 2);
    CellScanner cellScanner = next[0].cellScanner();
    cellScanner.advance();
    Cell current = cellScanner.current();
    assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(),
      row1, 0, row1.length));
    assertEquals(127L, current.getTimestamp());
    cellScanner.advance();
    current = cellScanner.current();
    assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(),
      row1, 0, row1.length));
    assertEquals(126L, current.getTimestamp());
    cellScanner.advance();
    current = cellScanner.current();
    assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(),
      row1, 0, row1.length));
    assertEquals(125L, current.getTimestamp());
    cellScanner = next[1].cellScanner();
    cellScanner.advance();
    current = cellScanner.current();
    assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(),
      row2, 0, row2.length));
  }
}
 
Example 16
Source File: SelfInsertSparkIT.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test(timeout = 300000)
public void testCountMatchesAfterSplit() throws Throwable {
    int maxLevel = 20;
    // block flushes
    assertTrue(HBaseTestUtils.setBlockPreFlush(true));
    try(PreparedStatement ps = methodWatcher.prepareStatement("select count(*) from foo")){
        try(Statement s =methodWatcher.getOrCreateConnection().createStatement()){
            String sql = "insert into foo (col1, col2) values (0,'234234324324sdfjkjdfsjksdjkfjkjksdjkfjksdjkfjkjksdjkfjksdkjfkjkjsdkjfkjsjkdjkfjksdjkfkjskjdkjfkjskjdjkfjksdjkjkfjksjkdf')";
            int updateCount = s.executeUpdate(sql);
            Assert.assertEquals("Incorrect update count!",1,updateCount);
            try(ResultSet rs = ps.executeQuery()){
                Assert.assertTrue("No rows returned from count query!",rs.next());
                Assert.assertEquals("Incorrect table size!",1l,rs.getLong(1));
            }

            for(int i=0;i<maxLevel;i++){
                long newSize = 1l<<i;
                LOG.trace("inserting "+newSize+" records");
                sql = "insert into foo select col1+"+newSize+", col2 from foo";
                updateCount = s.executeUpdate(sql);
                Assert.assertEquals("Incorrect reported update count!",newSize,updateCount);
                try(ResultSet rs = ps.executeQuery()){
                    Assert.assertTrue("No rows returned from count query!",rs.next());
                    Assert.assertEquals("Incorrect table count!",newSize<<1,rs.getLong(1));
                }
            }
        }
    }

    // unblock flushes
    assertTrue(HBaseTestUtils.setBlockPreFlush(false));

    final long expectedRows = 1l<<maxLevel;
    // flush table
    LOG.trace("Flushing table");
    String conglomerateNumber = TestUtils.lookupConglomerateNumber(CLASS_NAME, "foo", methodWatcher);
    TableName tableName = TableName.valueOf("splice", conglomerateNumber);
    try (Admin admin = connection.getAdmin()) {
        admin.flush(tableName);

        Thread.sleep(5000); // let it flush

        // block compactions
        assertTrue(HBaseTestUtils.setBlockPreCompact(true));

        LOG.trace("Splitting table");
        admin.split(tableName);
        LOG.trace("Waiting for split");
        while (admin.getTableRegions(tableName).size() < 2) {
            Thread.sleep(1000); // wait for split to complete
            admin.split(tableName); // just in case
        }
        LOG.trace("Split visible");

        try (PreparedStatement ps = methodWatcher.prepareStatement("select count(*) from foo --splice-properties useSpark=true")) {
            try (ResultSet rs = ps.executeQuery()) {
                Assert.assertTrue("No rows returned from count query!", rs.next());
                LOG.trace("Got result " + rs.getLong(1));
                Assert.assertEquals("Incorrect table count!", expectedRows, rs.getLong(1));
            }
        }

        // unblock compactions
        assertTrue(HBaseTestUtils.setBlockPreCompact(false));
    }
}
 
Example 17
Source File: IntegrationTestBigLinkedList.java    From hbase with Apache License 2.0 4 votes vote down vote up
static TableName getTableName(Configuration conf) {
  return TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
}
 
Example 18
Source File: DropTableWithViewsIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testDropTableWithChildViews() throws Exception {
    String baseTable = SchemaUtil.getTableName(SCHEMA1, generateUniqueName());
    try (Connection conn = DriverManager.getConnection(getUrl());
            Connection viewConn =
                    isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn) {
        // Empty the task table first.
        conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME);

        String ddlFormat =
                "CREATE TABLE IF NOT EXISTS " + baseTable + "  ("
                        + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
                        + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
        conn.createStatement().execute(generateDDL(ddlFormat));
        conn.commit();
        // Create a view tree (i.e., tree of views) with depth of 2 and fanout factor of 4
        for (int  i = 0; i < 4; i++) {
            String childView = SchemaUtil.getTableName(SCHEMA2, generateUniqueName());
            String childViewDDL = "CREATE VIEW " + childView + " AS SELECT * FROM " + baseTable;
            viewConn.createStatement().execute(childViewDDL);
            for (int j = 0; j < 4; j++) {
                String grandChildView = SchemaUtil.getTableName(SCHEMA2, generateUniqueName());
                String grandChildViewDDL = "CREATE VIEW " + grandChildView + " AS SELECT * FROM " + childView;
                viewConn.createStatement().execute(grandChildViewDDL);
            }
        }
        // Drop the base table
        String dropTable = String.format("DROP TABLE IF EXISTS %s CASCADE", baseTable);
        conn.createStatement().execute(dropTable);
        // Run DropChildViewsTask to complete the tasks for dropping child views. The depth of the view tree is 2,
        // so we expect that this will be done in two task handling runs as each non-root level will be processed
        // in one run
        TaskRegionObserver.SelfHealingTask task =
                new TaskRegionObserver.SelfHealingTask(
                        TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
        task.run();
        task.run();

        assertTaskColumns(conn, PTable.TaskStatus.COMPLETED.toString(), PTable.TaskType.DROP_CHILD_VIEWS,
                null, null, null, null, null);

        // Views should be dropped by now
        TableName linkTable = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES);
        TableViewFinderResult childViewsResult = new TableViewFinderResult();
        ViewUtil.findAllRelatives(getUtility().getConnection().getTable(linkTable),
                HConstants.EMPTY_BYTE_ARRAY,
                SchemaUtil.getSchemaNameFromFullName(baseTable).getBytes(),
                SchemaUtil.getTableNameFromFullName(baseTable).getBytes(),
                PTable.LinkType.CHILD_TABLE,
                childViewsResult);
        assertTrue(childViewsResult.getLinks().size() == 0);
        // There should not be any orphan views
        ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME +
                " WHERE " + PhoenixDatabaseMetaData.TABLE_SCHEM + " = '" + SCHEMA2 +"'");
        assertFalse(rs.next());
    }
}
 
Example 19
Source File: TestAccessController.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testAccessControllerUserPermsRegexHandling() throws Exception {
  User testRegexHandler = User.createUserForTesting(conf, "testRegexHandling", new String[0]);

  final String REGEX_ALL_TABLES = ".*";
  final String tableName = name.getMethodName();
  final TableName table1 = TableName.valueOf(tableName);
  final byte[] family = Bytes.toBytes("f1");

  // create table in default ns
  Admin admin = TEST_UTIL.getAdmin();
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(table1);
  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
  createTable(TEST_UTIL, tableDescriptor);

  // creating the ns and table in it
  String ns = "testNamespace";
  NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build();
  final TableName table2 = TableName.valueOf(ns, tableName);
  createNamespace(TEST_UTIL, desc);
  tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(table2);
  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
  createTable(TEST_UTIL, tableDescriptor);

  // Verify that we can read sys-tables
  String aclTableName = PermissionStorage.ACL_TABLE_NAME.getNameAsString();
  assertEquals(5, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size());
  assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(aclTableName)).size());

  // Grant TABLE ADMIN privs to testUserPerms
  assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size());
  grantOnTable(TEST_UTIL, testRegexHandler.getShortName(), table1, null, null, Action.ADMIN);
  assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size());
  grantOnTable(TEST_UTIL, testRegexHandler.getShortName(), table2, null, null, Action.ADMIN);
  assertEquals(4, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size());

  // USER_ADMIN, testUserPerms must have a row each.
  assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(tableName)).size());
  assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(
        NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + tableName)
      ).size());
  assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(
      ns + TableName.NAMESPACE_DELIM + tableName)).size());
  assertEquals(0, testRegexHandler.runAs(getPrivilegedAction("notMatchingAny")).size());

  deleteTable(TEST_UTIL, table1);
  deleteTable(TEST_UTIL, table2);
  deleteNamespace(TEST_UTIL, ns);
}
 
Example 20
Source File: HBaseCompat0_98.java    From incubator-atlas with Apache License 2.0 4 votes vote down vote up
@Override
public HTableDescriptor newTableDescriptor(String tableName) {
    TableName tn = TableName.valueOf(tableName);
    return new HTableDescriptor(tn);
}