org.apache.hadoop.hbase.client.Admin Java Examples

The following examples show how to use org.apache.hadoop.hbase.client.Admin. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBaseOperations.java    From geowave with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteAll() throws IOException {
  try (Admin admin = conn.getAdmin()) {
    final TableName[] tableNamesArr = admin.listTableNames();
    for (final TableName tableName : tableNamesArr) {
      if ((tableNamespace == null) || tableName.getNameAsString().startsWith(tableNamespace)) {
        synchronized (ADMIN_MUTEX) {
          if (admin.tableExists(tableName)) {
            disableTable(admin, tableName);
            admin.deleteTable(tableName);
          }
        }
      }
    }
    synchronized (this) {
      iteratorsAttached = false;
    }
    cfCache.clear();
    partitionCache.clear();
    coprocessorCache.clear();
    DataAdapterAndIndexCache.getInstance(
        RowMergingAdapterOptionProvider.ROW_MERGING_ADAPTER_CACHE_ID,
        tableNamespace,
        HBaseStoreFactoryFamily.TYPE).deleteAll();
  }
}
 
Example #2
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetHTableProperties() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE " + dataTableFullName + " (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("SALT_BUCKETS = 8");
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.createStatement().execute(ddl);
    ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED=FALSE";
    conn1.createStatement().execute(ddl);
    try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
        assertEquals(1, tableDesc.getColumnFamilies().length);
        assertEquals("0", tableDesc.getColumnFamilies()[0].getNameAsString());
        assertEquals(Boolean.toString(false), tableDesc.getValue(TableDescriptorBuilder.COMPACTION_ENABLED));
    }
}
 
Example #3
Source File: TestCloneSnapshotProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception {
  if (snapshot == null) {
    final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
    long tid = System.currentTimeMillis();
    final String snapshotName = "snapshot-" + tid;

    Admin admin = UTIL.getAdmin();
    // create Table
    SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF);
    // Load data
    SnapshotTestingUtils.loadData(UTIL, snapshotTableName, 500, CF);
    admin.disableTable(snapshotTableName);
    // take a snapshot
    admin.snapshot(snapshotName, snapshotTableName);
    admin.enableTable(snapshotTableName);

    List<SnapshotDescription> snapshotList = admin.listSnapshots();
    snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));
  }
  return snapshot;
}
 
Example #4
Source File: HBaseTables.java    From presto-hbase-connector with Apache License 2.0 6 votes vote down vote up
Set<String> getSchemaNames() {
    NamespaceDescriptor[] namespaceDescriptors = new NamespaceDescriptor[0];
    Admin admin = null;
    try {
        admin = hbaseClientManager.getAdmin();
        namespaceDescriptors = admin.listNamespaceDescriptors();
    } catch (IOException e) {
        logger.error(e, e.getMessage());
    } finally {
        if (admin != null) {
            hbaseClientManager.close(admin);
        }
    }

    HashSet<String> set = new HashSet<>();
    NamespaceDescriptor[] temp = namespaceDescriptors;
    int namespaceDescriptorLength = namespaceDescriptors.length;

    for (int i = 0; i < namespaceDescriptorLength; ++i) {
        NamespaceDescriptor namespaceDescriptor = temp[i];
        set.add(namespaceDescriptor.getName());
    }

    return set;
}
 
Example #5
Source File: TestBulkLoadReplication.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static void startThirdCluster() throws Exception {
  LOG.info("Setup Zk to same one from UTIL1 and UTIL2");
  UTIL3.setZkCluster(UTIL1.getZkCluster());
  UTIL3.startMiniCluster(NUM_SLAVES1);

  TableDescriptor table = TableDescriptorBuilder.newBuilder(tableName)
    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName)
      .setMobEnabled(true)
      .setMobThreshold(4000)
      .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build();

  Connection connection3 = ConnectionFactory.createConnection(CONF3);
  try (Admin admin3 = connection3.getAdmin()) {
    admin3.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  UTIL3.waitUntilAllRegionsAssigned(tableName);
  htable3 = connection3.getTable(tableName);
}
 
Example #6
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetHColumnProperties() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE " + dataTableFullName + " (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("SALT_BUCKETS = 8");
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.createStatement().execute(ddl);
    ddl = "ALTER TABLE " + dataTableFullName + " SET REPLICATION_SCOPE=1";
    conn1.createStatement().execute(ddl);
    try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
                .getColumnFamilies();
        assertEquals(1, columnFamilies.length);
        assertEquals("0", columnFamilies[0].getNameAsString());
        assertEquals(1, columnFamilies[0].getScope());
    }
}
 
Example #7
Source File: CanaryTool.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static List<Future<Void>> sniff(final Admin admin, final Sink sink,
    TableDescriptor tableDesc, ExecutorService executor, TaskType taskType,
    boolean rawScanEnabled, LongAdder rwLatency, boolean readAllCF) throws Exception {
  LOG.debug("Reading list of regions for table {}", tableDesc.getTableName());
  try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) {
    List<RegionTask> tasks = new ArrayList<>();
    try (RegionLocator regionLocator =
             admin.getConnection().getRegionLocator(tableDesc.getTableName())) {
      for (HRegionLocation location: regionLocator.getAllRegionLocations()) {
        if (location == null) {
          LOG.warn("Null location");
          continue;
        }
        ServerName rs = location.getServerName();
        RegionInfo region = location.getRegion();
        tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink)sink,
            taskType, rawScanEnabled, rwLatency, readAllCF));
        Map<String, List<RegionTaskResult>> regionMap = ((RegionStdOutSink) sink).getRegionMap();
        regionMap.put(region.getRegionNameAsString(), new ArrayList<RegionTaskResult>());
      }
      return executor.invokeAll(tasks);
    }
  } catch (TableNotFoundException e) {
    return Collections.EMPTY_LIST;
  }
}
 
Example #8
Source File: HBase_2_ClientService.java    From nifi with Apache License 2.0 6 votes vote down vote up
/**
 * As of Apache NiFi 1.5.0, due to changes made to
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}, which is used by this
 * class to authenticate a principal with Kerberos, HBase controller services no longer
 * attempt relogins explicitly.  For more information, please read the documentation for
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}.
 * <p/>
 * In previous versions of NiFi, a {@link org.apache.nifi.hadoop.KerberosTicketRenewer} was started
 * when the HBase controller service was enabled.  The use of a separate thread to explicitly relogin could cause
 * race conditions with the implicit relogin attempts made by hadoop/HBase code on a thread that references the same
 * {@link UserGroupInformation} instance.  One of these threads could leave the
 * {@link javax.security.auth.Subject} in {@link UserGroupInformation} to be cleared or in an unexpected state
 * while the other thread is attempting to use the {@link javax.security.auth.Subject}, resulting in failed
 * authentication attempts that would leave the HBase controller service in an unrecoverable state.
 *
 * @see SecurityUtil#loginKerberos(Configuration, String, String)
 */
@OnEnabled
public void onEnabled(final ConfigurationContext context) throws InitializationException, IOException, InterruptedException {
    this.connection = createConnection(context);

    // connection check
    if (this.connection != null) {
        final Admin admin = this.connection.getAdmin();
        if (admin != null) {
            admin.listTableNames();

            final ClusterStatus clusterStatus = admin.getClusterStatus();
            if (clusterStatus != null) {
                final ServerName master = clusterStatus.getMaster();
                if (master != null) {
                    masterAddress = master.getHostAndPort();
                } else {
                    masterAddress = null;
                }
            }
        }
    }
}
 
Example #9
Source File: SnapshotScannerHDFSAclController.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
  if (!initialized) {
    return;
  }
  try (Admin admin = c.getEnvironment().getConnection().getAdmin()) {
    if (admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) {
      // Check if acl table has 'm' CF, if not, add 'm' CF
      TableDescriptor tableDescriptor = admin.getDescriptor(PermissionStorage.ACL_TABLE_NAME);
      boolean containHdfsAclFamily = Arrays.stream(tableDescriptor.getColumnFamilies()).anyMatch(
        family -> Bytes.equals(family.getName(), SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY));
      if (!containHdfsAclFamily) {
        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor)
            .setColumnFamily(ColumnFamilyDescriptorBuilder
                .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build());
        admin.modifyTable(builder.build());
      }
      aclTableInitialized = true;
    } else {
      throw new TableNotFoundException("Table " + PermissionStorage.ACL_TABLE_NAME
          + " is not created yet. Please check if " + getClass().getName()
          + " is configured after " + AccessController.class.getName());
    }
  }
}
 
Example #10
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetPropertyAndAddColumnForDefaultColumnFamily() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    String ddl = "CREATE TABLE " + dataTableFullName +
            "  (a_string varchar not null, col1 integer" +
            "  CONSTRAINT pk PRIMARY KEY (a_string)) " + tableDDLOptions;
    try {
        conn.createStatement().execute(ddl);
        conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD col2 integer IN_MEMORY=true");
        try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
                    .getColumnFamilies();
            assertEquals(1, columnFamilies.length);
            assertEquals("0", columnFamilies[0].getNameAsString());
            assertTrue(columnFamilies[0].isInMemory());
        }
    } finally {
        conn.close();
    }
}
 
Example #11
Source File: FlappingAlterTableIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testNewColumnFamilyInheritsTTLOfEmptyCF() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE " + dataTableFullName + " (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) TTL = 1000";
    Connection conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.createStatement().execute(ddl);
    ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
    conn1.createStatement().execute(ddl);
    try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
        ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
        assertEquals(2, columnFamilies.length);
        assertEquals("0", columnFamilies[0].getNameAsString());
        assertEquals(1000, columnFamilies[0].getTimeToLive());
        assertEquals("CF", columnFamilies[1].getNameAsString());
        assertEquals(1000, columnFamilies[1].getTimeToLive());
    }
}
 
Example #12
Source File: TestMasterObserverToModifyTableSchema.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testMasterObserverToModifyTableSchema() throws IOException {
  TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLENAME);
  for (int i = 1; i <= 3; i++) {
    builder.setColumnFamily(
        ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf" + i)).setMaxVersions(i)
            .build());
  }
  try (Admin admin = UTIL.getAdmin()) {
    admin.createTable(builder.build());
    assertOneVersion(admin.getDescriptor(TABLENAME));

    builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1"))
        .setMaxVersions(Integer.MAX_VALUE).build());
    admin.modifyTable(builder.build());
    assertOneVersion(admin.getDescriptor(TABLENAME));
  }
}
 
Example #13
Source File: TestMasterQuotasObserver.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testTableRPCQuotaRemoved() throws Exception {
  final Connection conn = TEST_UTIL.getConnection();
  final Admin admin = conn.getAdmin();
  final TableName tn = TableName.valueOf(testName.getMethodName());
  // Drop the table if it somehow exists
  if (admin.tableExists(tn)) {
    dropTable(admin, tn);
  }

  createTable(admin, tn);
  assertEquals(0, getThrottleQuotas());

  // Set RPC quota
  QuotaSettings settings =
      QuotaSettingsFactory.throttleTable(tn, ThrottleType.REQUEST_SIZE, 2L, TimeUnit.HOURS);
  admin.setQuota(settings);

  assertEquals(1, getThrottleQuotas());

  // Delete the table and observe the RPC quota being automatically deleted as well
  dropTable(admin, tn);
  assertEquals(0, getThrottleQuotas());
}
 
Example #14
Source File: TestAccessController2.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTableWithGroupPermissions() throws Exception {
  grantGlobal(TEST_UTIL, TESTGROUP_1_NAME, Action.CREATE);
  try {
    AccessTestAction createAction = new AccessTestAction() {
      @Override
      public Object run() throws Exception {
        TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
          new TableDescriptorBuilder.ModifyableTableDescriptor(testTable.getTableName());
        tableDescriptor.setColumnFamily(
          new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(TEST_FAMILY));
        try (Connection connection =
            ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
          try (Admin admin = connection.getAdmin()) {
            admin.createTable(tableDescriptor);
          }
        }
        return null;
      }
    };
    verifyAllowed(createAction, TESTGROUP1_USER1);
    verifyDenied(createAction, TESTGROUP2_USER1);
  } finally {
    revokeGlobal(TEST_UTIL, TESTGROUP_1_NAME, Action.CREATE);
  }
}
 
Example #15
Source File: TestDelegationTokenWithEncryption.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testPutGetWithDelegationToken() throws Exception {
  TableName tableName = getTestTableName();
  byte[] family = Bytes.toBytes("f");
  byte[] qualifier = Bytes.toBytes("q");
  byte[] row = Bytes.toBytes("row");
  byte[] value = Bytes.toBytes("data");
  try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
    Admin admin = conn.getAdmin();
    TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
      new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
    tableDescriptor.setColumnFamily(
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
    admin.createTable(tableDescriptor);
    try (Table table = conn.getTable(tableName)) {
      table.put(new Put(row).addColumn(family, qualifier, value));
      Result result = table.get(new Get(row));
      assertArrayEquals(value, result.getValue(family, qualifier));
    }
  }
}
 
Example #16
Source File: HBaseRangerAuthorizationTest.java    From ranger with Apache License 2.0 6 votes vote down vote up
@Test
public void testReadTablesAsProcessOwner() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", "" + port);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");
    
    Connection conn = ConnectionFactory.createConnection(conf);
    Admin admin = conn.getAdmin();

    HTableDescriptor[] tableDescriptors = admin.listTables();
    for (HTableDescriptor desc : tableDescriptors) {
        LOG.info("Found table:[" + desc.getTableName().getNameAsString() + "]");
    }
    Assert.assertEquals(3, tableDescriptors.length);

    conn.close();
}
 
Example #17
Source File: TestRegionOpen.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception {
  final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName());
  ThreadPoolExecutor exec = getRS().getExecutorService()
      .getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION);
  long completed = exec.getCompletedTaskCount();

  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setPriority(HConstants.HIGH_QOS);
  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(
      HConstants.CATALOG_FAMILY));
  try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
      Admin admin = connection.getAdmin()) {
    admin.createTable(tableDescriptor);
  }

  assertEquals(completed + 1, exec.getCompletedTaskCount());
}
 
Example #18
Source File: CheckTableIT.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
public static void deleteFirstIndexRegion(SpliceWatcher spliceWatcher, Connection connection, String schemaName, String tableName, String indexName) throws Exception {
    SConfiguration config = HConfiguration.getConfiguration();
    HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate());
    Admin admin = testingUtility.getAdmin();

    // Delete 2nd region of index
    long   conglomerateId = TableSplit.getConglomerateId(connection, schemaName, tableName, indexName);
    TableName iName = TableName.valueOf(config.getNamespace(),Long.toString(conglomerateId));
    List<RegionInfo> partitions = admin.getRegions(iName);
    for (RegionInfo partition : partitions) {
        byte[] startKey = partition.getStartKey();
        if (startKey.length == 0) {
            String encodedRegionName = partition.getEncodedName();
            spliceWatcher.execute(String.format("call syscs_util.delete_region('%s', '%s', '%s', '%s', false)",
                    schemaName, tableName, indexName, encodedRegionName));
            break;
        }
    }
}
 
Example #19
Source File: TestClusterScopeQuotaThrottle.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testUserClusterScopeQuota() throws Exception {
  final Admin admin = TEST_UTIL.getAdmin();
  final String userName = User.getCurrent().getShortName();

  // Add 6req/min limit for read request in cluster scope
  admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.READ_NUMBER, 6,
    TimeUnit.MINUTES, QuotaScope.CLUSTER));
  // Add 6req/min limit for write request in machine scope
  admin.setQuota(
    QuotaSettingsFactory.throttleUser(userName, ThrottleType.WRITE_NUMBER, 6, TimeUnit.MINUTES));
  triggerUserCacheRefresh(TEST_UTIL, false, TABLE_NAMES);
  // should execute at max 6 read requests and at max 3 write write requests
  assertEquals(6, doPuts(10, FAMILY, QUALIFIER, tables[0]));
  assertEquals(3, doGets(10, tables[0]));
  // Remove all the limits
  admin.setQuota(QuotaSettingsFactory.unthrottleUser(userName));
  triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAMES);
}
 
Example #20
Source File: SnapshotTestingUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Confirm that the snapshot has no references files but only metadata.
 */
public static void confirmEmptySnapshotValid(
    SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
    byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
    throws IOException {
  ArrayList emptyTestFamilies = new ArrayList(1);
  emptyTestFamilies.add(testFamily);
  confirmSnapshotValid(snapshotDescriptor, tableName,
    null, emptyTestFamilies, rootDir, admin, fs);
}
 
Example #21
Source File: TestVerifyReplicationAdjunct.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TestReplicationBase.setUpBeforeClass();
  TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily(
                  ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100)
                          .build()).build();
  Connection connection2 = ConnectionFactory.createConnection(CONF2);
  try (Admin admin2 = connection2.getAdmin()) {
    admin2.createTable(peerTable, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  htable3 = connection2.getTable(peerTableName);
}
 
Example #22
Source File: RegionSplitter.java    From hbase with Apache License 2.0 5 votes vote down vote up
static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo,
        String[] columnFamilies, Configuration conf)
throws IOException, InterruptedException {
  final int splitCount = conf.getInt("split.count", 0);
  Preconditions.checkArgument(splitCount > 1, "Split count must be > 1");

  Preconditions.checkArgument(columnFamilies.length > 0,
      "Must specify at least one column family. ");
  LOG.debug("Creating table " + tableName + " with " + columnFamilies.length
      + " column families.  Presplitting to " + splitCount + " regions");

  TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
  for (String cf : columnFamilies) {
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf));
  }
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    Admin admin = connection.getAdmin();
    try {
      Preconditions.checkArgument(!admin.tableExists(tableName),
        "Table already exists: " + tableName);
      admin.createTable(builder.build(), splitAlgo.split(splitCount));
    } finally {
      admin.close();
    }
    LOG.debug("Table created!  Waiting for regions to show online in META...");
    if (!conf.getBoolean("split.verify", true)) {
      // NOTE: createTable is synchronous on the table, but not on the regions
      int onlineRegions = 0;
      while (onlineRegions < splitCount) {
        onlineRegions = MetaTableAccessor.getRegionCount(connection, tableName);
        LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
        if (onlineRegions < splitCount) {
          Thread.sleep(10 * 1000); // sleep
        }
      }
    }
    LOG.debug("Finished creating table with " + splitCount + " regions");
  }
}
 
Example #23
Source File: TestCoprocessorMetrics.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws IOException {
  try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
       Admin admin = connection.getAdmin()) {
    for (TableDescriptor htd : admin.listTableDescriptors()) {
      UTIL.deleteTable(htd.getTableName());
    }
  }
}
 
Example #24
Source File: TestRegionSizeUse.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testBasicRegionSizeReports() throws Exception {
  final long bytesWritten = 5L * 1024L * 1024L; // 5MB
  final TableName tn = writeData(bytesWritten);
  LOG.debug("Data was written to HBase");
  final Admin admin = TEST_UTIL.getAdmin();
  // Push the data to disk.
  admin.flush(tn);
  LOG.debug("Data flushed to disk");
  // Get the final region distribution
  final List<RegionInfo> regions = TEST_UTIL.getAdmin().getRegions(tn);

  HMaster master = cluster.getMaster();
  MasterQuotaManager quotaManager = master.getMasterQuotaManager();
  Map<RegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
  // Wait until we get all of the region reports for our table
  // The table may split, so make sure we have at least as many as expected right after we
  // finished writing the data.
  int observedRegions = numRegionsForTable(tn, regionSizes);
  while (observedRegions < regions.size()) {
    LOG.debug("Expecting more regions. Saw " + observedRegions
        + " region sizes reported, expected at least " + regions.size());
    Thread.sleep(1000);
    regionSizes = quotaManager.snapshotRegionSizes();
    observedRegions = numRegionsForTable(tn, regionSizes);
  }

  LOG.debug("Observed region sizes by the HMaster: " + regionSizes);
  long totalRegionSize = 0L;
  for (Long regionSize : regionSizes.values()) {
    totalRegionSize += regionSize;
  }
  assertTrue("Expected region size report to exceed " + bytesWritten + ", but was "
      + totalRegionSize + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize);
}
 
Example #25
Source File: TestGlobalReplicationThrottler.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1 = HBaseConfiguration.create();
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setLong("replication.source.sleepforretries", 100);
  // Each WAL is about 120 bytes
  conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, REPLICATION_SOURCE_QUOTA);
  conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  new ZKWatcher(conf1, "cluster1", null, true);

  conf2 = new Configuration(conf1);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);
  new ZKWatcher(conf2, "cluster2", null, true);

  ReplicationPeerConfig rpc = new ReplicationPeerConfig();
  rpc.setClusterKey(utility2.getClusterKey());

  utility1.startMiniCluster();
  utility2.startMiniCluster();

  Admin admin1 = ConnectionFactory.createConnection(conf1).getAdmin();
  admin1.addReplicationPeer("peer1", rpc);
  admin1.addReplicationPeer("peer2", rpc);
  admin1.addReplicationPeer("peer3", rpc);
  numOfPeer = admin1.listReplicationPeers().size();
}
 
Example #26
Source File: TestHBaseMetaEdit.java    From hbase with Apache License 2.0 5 votes vote down vote up
private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOException {
  Admin admin = UTIL.getAdmin();
  TableDescriptor get = admin.getDescriptor(TableName.META_TABLE_NAME);
  TableDescriptor list =
    admin.listTableDescriptors(true).stream().filter(td -> td.isMetaTable()).findAny().get();
  TableDescriptor listByName =
    admin.listTableDescriptors(Collections.singletonList(TableName.META_TABLE_NAME)).get(0);
  TableDescriptor listByNs =
    admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME).stream()
      .filter(td -> td.isMetaTable()).findAny().get();
  assertEquals(get, list);
  assertEquals(get, listByName);
  assertEquals(get, listByNs);
  return get;
}
 
Example #27
Source File: HbaseSessions.java    From hugegraph with Apache License 2.0 5 votes vote down vote up
public void createNamespace() throws IOException {
    NamespaceDescriptor ns = NamespaceDescriptor.create(this.namespace)
                                                .build();
    try (Admin admin = this.hbase.getAdmin()) {
        admin.createNamespace(ns);
    }
}
 
Example #28
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetHColumnPropertyAndAddColumnForNewCFForTableWithOnlyPKCols() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    try {
        String ddl = "create table " + dataTableFullName + " ("
                + " id char(1) NOT NULL,"
                + " col1 integer NOT NULL,"
                + " col2 bigint NOT NULL,"
                + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
                + " ) " + generateDDLOptions("TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'");
        conn.createStatement().execute(ddl);
        ddl = "ALTER TABLE " + dataTableFullName + " ADD NEWCF.COL3 INTEGER IN_MEMORY=true";
        conn.createStatement().execute(ddl);
        conn.commit();
        try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
            ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
            assertEquals(2, columnFamilies.length);
            assertEquals("NEWCF", columnFamilies[0].getNameAsString());
            assertEquals(true, columnFamilies[0].isInMemory());
            assertEquals("XYZ", columnFamilies[1].getNameAsString());
            assertEquals(false, columnFamilies[1].isInMemory());
        }
    } finally {
        conn.close();
    }
}
 
Example #29
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetPropertyAndAddColumnForNewAndExistingColumnFamily() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String ddl = "CREATE TABLE " + dataTableFullName + " "
            +
            "  (a_string varchar not null, col1 integer, CF1.col2 integer" +
            "  CONSTRAINT pk PRIMARY KEY (a_string)) " + tableDDLOptions;
    try {
        conn.createStatement().execute(ddl);
        conn.createStatement()
                .execute(
                        "ALTER TABLE "
                                + dataTableFullName
                                + " ADD col4 integer, CF1.col5 integer, CF2.col6 integer IN_MEMORY=true, REPLICATION_SCOPE=1, CF2.IN_MEMORY=false ");
        try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
                    .getColumnFamilies();
            assertEquals(3, columnFamilies.length);
            assertEquals("0", columnFamilies[0].getNameAsString());
            assertTrue(columnFamilies[0].isInMemory());
            assertEquals(1, columnFamilies[0].getScope());
            assertEquals("CF1", columnFamilies[1].getNameAsString());
            assertTrue(columnFamilies[1].isInMemory());
            assertEquals(1, columnFamilies[1].getScope());
            assertEquals("CF2", columnFamilies[2].getNameAsString());
            assertFalse(columnFamilies[2].isInMemory());
            assertEquals(1, columnFamilies[2].getScope());
        }
    } finally {
        conn.close();
    }
}
 
Example #30
Source File: CustomSaslAuthenticationProviderTestBase.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void createTable() throws Exception {
  tableName = name.getTableName();

  // Create a table and write a record as the service user (hbase)
  UserGroupInformation serviceUgi = UserGroupInformation
    .loginUserFromKeytabAndReturnUGI("hbase/localhost", KEYTAB_FILE.getAbsolutePath());
  clusterId = serviceUgi.doAs(new PrivilegedExceptionAction<String>() {
    @Override
    public String run() throws Exception {
      try (Connection conn = ConnectionFactory.createConnection(CONF);
        Admin admin = conn.getAdmin();) {
        admin.createTable(TableDescriptorBuilder.newBuilder(tableName)
          .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build());

        UTIL.waitTableAvailable(tableName);

        try (Table t = conn.getTable(tableName)) {
          Put p = new Put(Bytes.toBytes("r1"));
          p.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
          t.put(p);
        }

        return admin.getClusterMetrics().getClusterId();
      }
    }
  });
  assertNotNull(clusterId);
}