Java Code Examples for org.apache.hadoop.hbase.client.Admin#enableTable()

The following examples show how to use org.apache.hadoop.hbase.client.Admin#enableTable() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HtableAlterMetadataCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private void alter() throws IOException {
    Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl());
    Admin hbaseAdmin = null;

    try {
        hbaseAdmin = conn.getAdmin();
        HTableDescriptor table = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));

        hbaseAdmin.disableTable(table.getTableName());
        table.setValue(metadataKey, metadataValue);
        hbaseAdmin.modifyTable(table.getTableName(), table);
        hbaseAdmin.enableTable(table.getTableName());
    } finally {
        if (hbaseAdmin != null) {
            hbaseAdmin.close();
        }
    }
}
 
Example 2
Source File: IndexUpgradeTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void enableTable(Admin admin, String dataTable, Set<String>indexes)
        throws IOException {
    if (!admin.isTableEnabled(TableName.valueOf(dataTable))) {
        if (!dryRun) {
            admin.enableTable(TableName.valueOf(dataTable));
        }
        LOGGER.info("Enabled data table " + dataTable);
    } else {
        LOGGER.info( "Data table " + dataTable + " is already enabled");
    }
    for (String indexName : indexes) {
        if(!admin.isTableEnabled(TableName.valueOf(indexName))) {
            if (!dryRun) {
                admin.enableTable(TableName.valueOf(indexName));
            }
            LOGGER.info("Enabled index table " + indexName);
        } else {
            LOGGER.info( "Index table " + indexName + " is already enabled");
        }
    }
}
 
Example 3
Source File: TestCloneSnapshotProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception {
  if (snapshot == null) {
    final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
    long tid = System.currentTimeMillis();
    final String snapshotName = "snapshot-" + tid;

    Admin admin = UTIL.getAdmin();
    // create Table
    SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF);
    // Load data
    SnapshotTestingUtils.loadData(UTIL, snapshotTableName, 500, CF);
    admin.disableTable(snapshotTableName);
    // take a snapshot
    admin.snapshot(snapshotName, snapshotTableName);
    admin.enableTable(snapshotTableName);

    List<SnapshotDescription> snapshotList = admin.listSnapshots();
    snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));
  }
  return snapshot;
}
 
Example 4
Source File: TestSpaceQuotaBasicFunctioning.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testNoEnableAfterDisablePolicy() throws Exception {
  Put p = new Put(Bytes.toBytes("to_reject"));
  p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
      Bytes.toBytes("reject"));
  final TableName tn = helper.writeUntilViolation(SpaceViolationPolicy.DISABLE);
  final Admin admin = TEST_UTIL.getAdmin();
  // Disabling a table relies on some external action (over the other policies), so wait a bit
  // more than the other tests.
  for (int i = 0; i < NUM_RETRIES * 2; i++) {
    if (admin.isTableEnabled(tn)) {
      LOG.info(tn + " is still enabled, expecting it to be disabled. Will wait and re-check.");
      Thread.sleep(2000);
    }
  }
  assertFalse(tn + " is still enabled but it should be disabled", admin.isTableEnabled(tn));
  try {
    admin.enableTable(tn);
  } catch (AccessDeniedException e) {
    String exceptionContents = StringUtils.stringifyException(e);
    final String expectedText = "violated space quota";
    assertTrue(
        "Expected the exception to contain " + expectedText + ", but was: " + exceptionContents,
        exceptionContents.contains(expectedText));
  }
}
 
Example 5
Source File: TestSystemTableSnapshot.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
  * Verify backup system table snapshot.
  *
  * @throws Exception if an operation on the table fails
  */
// @Test
 public void _testBackupRestoreSystemTable() throws Exception {
   LOG.info("test snapshot system table");

   TableName backupSystem = BackupSystemTable.getTableName(conf1);

   Admin hba = TEST_UTIL.getAdmin();
   String snapshotName = "sysTable";
   hba.snapshot(snapshotName, backupSystem);

   hba.disableTable(backupSystem);
   hba.restoreSnapshot(snapshotName);
   hba.enableTable(backupSystem);
   hba.close();
 }
 
Example 6
Source File: HtableAlterMetadataCLI.java    From kylin with Apache License 2.0 6 votes vote down vote up
private void alter() throws IOException {
    Connection conn = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl());
    Admin hbaseAdmin = null;

    try {
        hbaseAdmin = conn.getAdmin();
        HTableDescriptor table = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));

        hbaseAdmin.disableTable(table.getTableName());
        table.setValue(metadataKey, metadataValue);
        hbaseAdmin.modifyTable(table.getTableName(), table);
        hbaseAdmin.enableTable(table.getTableName());
    } finally {
        if (hbaseAdmin != null) {
            hbaseAdmin.close();
        }
    }
}
 
Example 7
Source File: TestBackupSystemTable.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void cleanBackupTable() throws IOException {
  Admin admin = UTIL.getAdmin();
  admin.disableTable(BackupSystemTable.getTableName(conf));
  admin.truncateTable(BackupSystemTable.getTableName(conf), true);
  if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) {
    admin.enableTable(BackupSystemTable.getTableName(conf));
  }
}
 
Example 8
Source File: TestCoprocessorTableEndpoint.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static void updateTable(
    final TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor) throws Exception {
  Admin admin = TEST_UTIL.getAdmin();
  admin.disableTable(tableDescriptor.getTableName());
  admin.modifyTable(tableDescriptor);
  admin.enableTable(tableDescriptor.getTableName());
}
 
Example 9
Source File: EnrichmentCoprocessorIntegrationTest.java    From metron with Apache License 2.0 5 votes vote down vote up
private static void addCoprocessor(TableName tableName) throws IOException {
  // https://hbase.apache.org/1.1/book.html#cp_loading
  Admin hbaseAdmin = testUtil.getConnection().getAdmin();
  hbaseAdmin.disableTable(tableName);
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  htd.addCoprocessor(EnrichmentCoprocessor.class.getCanonicalName());
  hbaseAdmin.modifyTable(tableName, htd);
  hbaseAdmin.enableTable(tableName);
}
 
Example 10
Source File: TestHBaseCommitTable.java    From phoenix-omid with Apache License 2.0 5 votes vote down vote up
@BeforeMethod
public void setUp() throws Exception {
    Admin admin = testutil.getHBaseAdmin();

    if (!admin.tableExists(TableName.valueOf(TEST_TABLE))) {
        HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);

        HColumnDescriptor datafam = new HColumnDescriptor(commitTableFamily);
        datafam.setMaxVersions(Integer.MAX_VALUE);
        desc.addFamily(datafam);

        HColumnDescriptor lowWatermarkFam = new HColumnDescriptor(lowWatermarkFamily);
        lowWatermarkFam.setMaxVersions(Integer.MAX_VALUE);
        desc.addFamily(lowWatermarkFam);

        // Move to HBaseSims for 2.0 support
        // For 2.0, use TableDescriptorBuilder to build TableDescriptor
        admin.createTable(desc);
    }

    if (admin.isTableDisabled(TableName.valueOf(TEST_TABLE))) {
        admin.enableTable(TableName.valueOf(TEST_TABLE));
    }
    HTableDescriptor[] tables = admin.listTables();
    for (HTableDescriptor t : tables) {
        LOG.info(t.getNameAsString());
    }
}
 
Example 11
Source File: TestLoadAndSwitchEncodeOnDisk.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
@Test
public void loadTest() throws Exception {
  Admin admin = TEST_UTIL.getAdmin();

  compression = Compression.Algorithm.GZ; // used for table setup
  super.loadTest();

  ColumnFamilyDescriptor hcd = getColumnDesc(admin);
  System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
  Table t = TEST_UTIL.getConnection().getTable(TABLE);
  assertAllOnLine(t);

  admin.disableTable(TABLE);
  admin.modifyColumnFamily(TABLE, hcd);

  System.err.println("\nRe-enabling table\n");
  admin.enableTable(TABLE);

  System.err.println("\nNew column descriptor: " +
      getColumnDesc(admin) + "\n");

  // The table may not have all regions on line yet.  Assert online before
  // moving to major compact.
  assertAllOnLine(t);

  System.err.println("\nCompacting the table\n");
  admin.majorCompact(TABLE);
  // Wait until compaction completes
  Threads.sleepWithoutInterrupt(5000);
  HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
    Threads.sleep(50);
  }

  System.err.println("\nDone with the test, shutting down the cluster\n");
}
 
Example 12
Source File: DeployCoprocessorCLI.java    From kylin with Apache License 2.0 4 votes vote down vote up
public static boolean resetCoprocessor(String tableName, Admin hbaseAdmin, Path hdfsCoprocessorJar)
        throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));

    //when the table has migrated from dev env to test(prod) env, the dev server
    //should not reset the coprocessor of the table.
    String host = desc.getValue(IRealizationConstants.HTableTag);
    if (!host.equalsIgnoreCase(kylinConfig.getMetadataUrlPrefix())) {
        logger.warn("This server doesn't own this table: " + tableName);
        return false;
    }

    logger.info("reset coprocessor on " + tableName);

    logger.info("Disable " + tableName);
    if (hbaseAdmin.isTableEnabled(TableName.valueOf(tableName))) {
        hbaseAdmin.disableTable(TableName.valueOf(tableName));
    }

    while (desc.hasCoprocessor(CubeObserverClassOld2)) {
        desc.removeCoprocessor(CubeObserverClassOld2);
    }
    while (desc.hasCoprocessor(CubeEndpointClass)) {
        desc.removeCoprocessor(CubeEndpointClass);
    }
    while (desc.hasCoprocessor(IIEndpointClass)) {
        desc.removeCoprocessor(IIEndpointClass);
    }
    // remove legacy coprocessor from v1.x
    while (desc.hasCoprocessor(CubeObserverClassOld)) {
        desc.removeCoprocessor(CubeObserverClassOld);
    }
    while (desc.hasCoprocessor(IIEndpointClassOld)) {
        desc.removeCoprocessor(IIEndpointClassOld);
    }
    addCoprocessorOnHTable(desc, hdfsCoprocessorJar);

    // update commit tags
    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        desc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    hbaseAdmin.modifyTable(TableName.valueOf(tableName), desc);

    logger.info("Enable " + tableName);
    hbaseAdmin.enableTable(TableName.valueOf(tableName));

    return true;
}
 
Example 13
Source File: MutableIndexReplicationIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testReplicationWithMutableIndexes() throws Exception {
    Connection conn = getConnection();

    //create the primary and index tables
    conn.createStatement().execute(
            "CREATE TABLE " + DATA_TABLE_FULL_NAME
                    + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
    conn.createStatement().execute(
            "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME
                    + " (v1)");

    // make sure that the tables are empty, but reachable
    String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
    ResultSet rs = conn.createStatement().executeQuery(query);
    assertFalse(rs.next());

    //make sure there is no data in the table
    query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
    rs = conn.createStatement().executeQuery(query);
    assertFalse(rs.next());

    // make sure the data tables are created on the remote cluster
    Admin admin = utility1.getAdmin();
    Admin admin2 = utility2.getAdmin();

    List<String> dataTables = new ArrayList<String>();
    dataTables.add(DATA_TABLE_FULL_NAME);
    dataTables.add(INDEX_TABLE_FULL_NAME);
    for (String tableName : dataTables) {
        TableDescriptor desc = admin.getDescriptor(TableName.valueOf(tableName));

        //create it as-is on the remote cluster
        admin2.createTable(desc);

        LOGGER.info("Enabling replication on source table: "+tableName);
        ColumnFamilyDescriptor[] cols = desc.getColumnFamilies();
        assertEquals(1, cols.length);
        // add the replication scope to the column
        ColumnFamilyDescriptor col = ColumnFamilyDescriptorBuilder.newBuilder(cols[0].getName()).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build();
        desc=TableDescriptorBuilder.newBuilder(desc).removeColumnFamily(cols[0].getName()).addColumnFamily(col).build();
        //disable/modify/enable table so it has replication enabled
        admin.disableTable(desc.getTableName());
        admin.modifyTable(desc);
        admin.enableTable(desc.getTableName());
        LOGGER.info("Replication enabled on source table: "+tableName);
    }


    // load some data into the source cluster table
    PreparedStatement stmt =
            conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
    stmt.setString(1, "a"); // k
    stmt.setString(2, "x"); // v1 <- has index
    stmt.setString(3, "1"); // v2
    stmt.execute();
    conn.commit();

    // make sure the index is working as expected
    query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
    rs = conn.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals("x", rs.getString(1));
    assertFalse(rs.next());
    conn.close();

    /*
     Validate that we have replicated the rows to the remote cluster
    */

    // other table can't be reached through Phoenix right now - would need to change how we
    // lookup tables. For right now, we just go through an HTable
    LOGGER.info("Looking up tables in replication target");
    TableName[] tables = admin2.listTableNames();
    org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(utility2.getConfiguration());
    Table remoteTable = hbaseConn.getTable(tables[0]);
    for (int i = 0; i < REPLICATION_RETRIES; i++) {
        if (i >= REPLICATION_RETRIES - 1) {
            fail("Waited too much time for put replication on table " + remoteTable
                    .getDescriptor().getTableName());
        }
        if (ensureAnyRows(remoteTable)) {
            break;
        }
        LOGGER.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
                + " for edits to get replicated");
        Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
    }
    remoteTable.close();
}
 
Example 14
Source File: TestRestoreSnapshotProcedure.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void setupSnapshotAndUpdateTable() throws Exception {
  long tid = System.currentTimeMillis();
  final String snapshotName = "snapshot-" + tid;
  Admin admin = UTIL.getAdmin();
  // create Table
  SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF1, CF2);
  // Load data
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1, CF1);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF2, CF2);
  SnapshotTestingUtils.verifyRowCount(UTIL, snapshotTableName, rowCountCF1 + rowCountCF2);

  snapshotHTD = admin.getDescriptor(snapshotTableName);

  admin.disableTable(snapshotTableName);
  // take a snapshot
  admin.snapshot(snapshotName, snapshotTableName);

  List<SnapshotDescription> snapshotList = admin.listSnapshots();
  snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));

  // modify the table
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor columnFamilyDescriptor3 =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(CF3);
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor columnFamilyDescriptor4 =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(CF4);
  admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor3);
  admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor4);
  admin.deleteColumnFamily(snapshotTableName, CF2);
  // enable table and insert data
  admin.enableTable(snapshotTableName);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF3, CF3);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF4, CF4);
  SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1addition, CF1);
  HTableDescriptor currentHTD = new HTableDescriptor(admin.getDescriptor(snapshotTableName));
  assertTrue(currentHTD.hasFamily(CF1));
  assertFalse(currentHTD.hasFamily(CF2));
  assertTrue(currentHTD.hasFamily(CF3));
  assertTrue(currentHTD.hasFamily(CF4));
  assertNotEquals(currentHTD.getFamiliesKeys().size(), snapshotHTD.getColumnFamilies().length);
  SnapshotTestingUtils.verifyRowCount(
    UTIL, snapshotTableName, rowCountCF1 + rowCountCF3 + rowCountCF4 + rowCountCF1addition);
  admin.disableTable(snapshotTableName);
}
 
Example 15
Source File: TestMasterRestartAfterDisablingTable.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
    throws Exception {
  final int NUM_MASTERS = 2;
  final int NUM_REGIONS_TO_CREATE = 4;

  // Start the cluster
  log("Starting cluster");
  Configuration conf = HBaseConfiguration.create();
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  StartMiniClusterOption option = StartMiniClusterOption.builder()
      .numMasters(NUM_MASTERS).build();
  TEST_UTIL.startMiniCluster(option);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  log("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();

  // Create a table with regions
  final TableName tableName = TableName.valueOf(name.getMethodName());
  byte[] family = Bytes.toBytes("family");
  log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
  Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
  int numRegions = -1;
  try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
    numRegions = r.getStartKeys().length;
  }
  numRegions += 1; // catalogs
  log("Waiting for no more RIT\n");
  TEST_UTIL.waitUntilNoRegionsInTransition(60000);
  log("Disabling table\n");
  TEST_UTIL.getAdmin().disableTable(tableName);

  NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The number of regions for the table tableRestart should be 0 and only" +
    "the catalog table should be present.", 1, regions.size());

  List<MasterThread> masterThreads = cluster.getMasterThreads();
  MasterThread activeMaster = null;
  if (masterThreads.get(0).getMaster().isActiveMaster()) {
    activeMaster = masterThreads.get(0);
  } else {
    activeMaster = masterThreads.get(1);
  }
  activeMaster.getMaster().stop(
      "stopping the active master so that the backup can become active");
  cluster.hbaseCluster.waitOnMaster(activeMaster);
  cluster.waitForActiveAndReadyMaster();

  assertTrue("The table should not be in enabled state",
      cluster.getMaster().getTableStateManager().isTableState(
      TableName.valueOf(name.getMethodName()), TableState.State.DISABLED,
      TableState.State.DISABLING));
  log("Enabling table\n");
  // Need a new Admin, the previous one is on the old master
  Admin admin = TEST_UTIL.getAdmin();
  admin.enableTable(tableName);
  admin.close();
  log("Waiting for no more RIT\n");
  TEST_UTIL.waitUntilNoRegionsInTransition(60000);
  log("Verifying there are " + numRegions + " assigned on cluster\n");
  regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The assigned regions were not onlined after master" +
    " switch except for the catalog table.", 5, regions.size());
  assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
    .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
  ht.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
Example 16
Source File: IntegrationTestDDLMasterFailover.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
void perform() throws IOException {

  TableDescriptor selected = selectTable(disabledTables);
  if (selected == null ) {
    return;
  }

  Admin admin = connection.getAdmin();
  try {
    TableName tableName = selected.getTableName();
    LOG.info("Enabling table :" + selected);
    admin.enableTable(tableName);
    Assert.assertTrue("Table: " + selected + " was not enabled",
        admin.isTableEnabled(tableName));
    TableDescriptor freshTableDesc = admin.getDescriptor(tableName);
    Assert.assertTrue(
      "After enable, Table: " + tableName + " in not enabled", admin.isTableEnabled(tableName));
    enabledTables.put(tableName, freshTableDesc);
    LOG.info("Enabled table :" + freshTableDesc);
  } catch (Exception e){
    LOG.warn("Caught exception in action: " + this.getClass());
    // TODO workaround
    // loose restriction for TableNotDisabledException/TableNotEnabledException thrown in sync
    // operations 1) when enable/disable starts, the table state is changed to
    // ENABLING/DISABLING (ZK node in 1.x), which will be further changed to ENABLED/DISABLED
    // once the operation completes 2) if master failover happens in the middle of the
    // enable/disable operation, the new master will try to recover the tables in
    // ENABLING/DISABLING state, as programmed in
    // AssignmentManager#recoverTableInEnablingState() and
    // AssignmentManager#recoverTableInDisablingState()
    // 3) after the new master initialization completes, the procedure tries to re-do the
    // enable/disable operation, which was already done. Ignore those exceptions before
    // change of behaviors of AssignmentManager in presence of PV2
    if (e instanceof TableNotDisabledException) {
      LOG.warn("Caught TableNotDisabledException in action: " + this.getClass());
      e.printStackTrace();
    } else {
      throw e;
    }
  } finally {
    admin.close();
  }
}
 
Example 17
Source File: QueryDatabaseMetaDataIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateOnExistingTable() throws Exception {
    try (PhoenixConnection pconn =
            DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        String tableName = generateUniqueName();// MDTEST_NAME;
        String schemaName = "";// MDTEST_SCHEMA_NAME;
        byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
        byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
        byte[] cfC = Bytes.toBytes("c");
        byte[][] familyNames = new byte[][] { cfB, cfC };
        byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
        Admin admin = pconn.getQueryServices().getAdmin();
        try {
            admin.disableTable(TableName.valueOf(htableName));
            admin.deleteTable(TableName.valueOf(htableName));
            admin.enableTable(TableName.valueOf(htableName));
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }

        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
        for (byte[] familyName : familyNames) {
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
        }
        admin.createTable(builder.build());
        createMDTestTable(pconn, tableName,
            "a." + ColumnFamilyDescriptorBuilder.BLOCKSIZE+ "=" + 50000);

        TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(htableName));
        assertEquals(3, descriptor.getColumnFamilies().length);
        ColumnFamilyDescriptor cdA = descriptor.getColumnFamily(cfA);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
        assertNotEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdA.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using
                                                                          // WITH
        assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
        ColumnFamilyDescriptor cdB = descriptor.getColumnFamily(cfB);
        // Allow KEEP_DELETED_CELLS to be false for VIEW
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdB.getBlocksize());
        assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the
                                                                          // original value.
        // CF c should stay the same since it's not a Phoenix cf.
        ColumnFamilyDescriptor cdC = descriptor.getColumnFamily(cfC);
        assertNotNull("Column family not found", cdC);
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
        assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE, cdC.getBlocksize());
        assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
        assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
        assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
        admin.close();

        int rowCount = 5;
        String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
        PreparedStatement ps = pconn.prepareStatement(upsert);
        for (int i = 0; i < rowCount; i++) {
            ps.setString(1, Integer.toString(i));
            ps.setInt(2, i + 1);
            ps.setInt(3, i + 2);
            ps.execute();
        }
        pconn.commit();
        String query = "SELECT count(1) FROM " + tableName;
        ResultSet rs = pconn.createStatement().executeQuery(query);
        assertTrue(rs.next());
        assertEquals(rowCount, rs.getLong(1));

        query = "SELECT id, col1,col2 FROM " + tableName;
        rs = pconn.createStatement().executeQuery(query);
        for (int i = 0; i < rowCount; i++) {
            assertTrue(rs.next());
            assertEquals(Integer.toString(i), rs.getString(1));
            assertEquals(i + 1, rs.getInt(2));
            assertEquals(i + 2, rs.getInt(3));
        }
        assertFalse(rs.next());
    }
}
 
Example 18
Source File: IntegrationTestTimeBoundedRequestsWithRegionReplicas.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey,
    int recordSize, int writeThreads, int readThreads) throws Exception {
  LOG.info("Cluster size:"+
    util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size());

  long start = System.currentTimeMillis();
  String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
  long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
  long startKey = 0;

  long numKeys = getNumKeys(keysPerServerPerIter);


  // write data once
  LOG.info("Writing some data to the table");
  writeData(colsPerKey, recordSize, writeThreads, startKey, numKeys);

  // flush the table
  LOG.info("Flushing the table");
  Admin admin = util.getAdmin();
  admin.flush(getTablename());

  // re-open the regions to make sure that the replicas are up to date
  long refreshTime = conf.getLong(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 0);
  if (refreshTime > 0 && refreshTime <= 10000) {
    LOG.info("Sleeping " + refreshTime + "ms to ensure that the data is replicated");
    Threads.sleep(refreshTime*3);
  } else {
    LOG.info("Reopening the table");
    admin.disableTable(getTablename());
    admin.enableTable(getTablename());
  }

  // We should only start the ChaosMonkey after the readers are started and have cached
  // all of the region locations. Because the meta is not replicated, the timebounded reads
  // will timeout if meta server is killed.
  // We will start the chaos monkey after 1 minute, and since the readers are reading random
  // keys, it should be enough to cache every region entry.
  long chaosMonkeyDelay = conf.getLong(String.format("%s.%s", TEST_NAME, CHAOS_MONKEY_DELAY_KEY)
    , DEFAUL_CHAOS_MONKEY_DELAY);
  ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
  LOG.info(String.format("ChaosMonkey delay is : %d seconds. Will start %s " +
      "ChaosMonkey after delay", chaosMonkeyDelay / 1000, monkeyToUse));
  ScheduledFuture<?> result = executorService.schedule(new Runnable() {
    @Override
    public void run() {
      try {
        LOG.info("Starting ChaosMonkey");
        monkey.start();
        monkey.waitForStop();
      } catch (Exception e) {
        LOG.warn(StringUtils.stringifyException(e));
      }

    }
  }, chaosMonkeyDelay, TimeUnit.MILLISECONDS);

  // set the intended run time for the reader. The reader will do read requests
  // to random keys for this amount of time.
  long remainingTime = runtime - (System.currentTimeMillis() - start);
  if (remainingTime <= 0) {
    LOG.error("The amount of time left for the test to perform random reads is "
        + "non-positive. Increase the test execution time via "
        + String.format(RUN_TIME_KEY,
              IntegrationTestTimeBoundedRequestsWithRegionReplicas.class.getSimpleName())
        + " or reduce the amount of data written per server via "
        + IntegrationTestTimeBoundedRequestsWithRegionReplicas.class.getSimpleName()
        + "." + IntegrationTestIngest.NUM_KEYS_PER_SERVER_KEY);
    throw new IllegalArgumentException("No time remains to execute random reads");
  }
  LOG.info("Reading random keys from the table for " + remainingTime/60000 + " min");
  this.conf.setLong(
    String.format(RUN_TIME_KEY, TimeBoundedMultiThreadedReader.class.getSimpleName())
    , remainingTime); // load tool shares the same conf

  // now start the readers which will run for configured run time
  try {
    int ret = loadTool.run(getArgsForLoadTestTool("-read", String.format("100:%d", readThreads)
      , startKey, numKeys));
    if (0 != ret) {
      String errorMsg = "Verification failed with error code " + ret;
      LOG.error(errorMsg);
      Assert.fail(errorMsg);
    }
  } finally {
    if (result != null) result.cancel(false);
    monkey.stop("Stopping the test");
    monkey.waitForStop();
    executorService.shutdown();
  }
}
 
Example 19
Source File: DeployCoprocessorCLI.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static boolean resetCoprocessor(String tableName, Admin hbaseAdmin, Path hdfsCoprocessorJar)
        throws IOException {
    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));

    //when the table has migrated from dev env to test(prod) env, the dev server
    //should not reset the coprocessor of the table.
    String host = desc.getValue(IRealizationConstants.HTableTag);
    if (!host.equalsIgnoreCase(kylinConfig.getMetadataUrlPrefix())) {
        logger.warn("This server doesn't own this table: " + tableName);
        return false;
    }

    logger.info("reset coprocessor on " + tableName);

    logger.info("Disable " + tableName);
    if (hbaseAdmin.isTableEnabled(TableName.valueOf(tableName))) {
        hbaseAdmin.disableTable(TableName.valueOf(tableName));
    }

    while (desc.hasCoprocessor(CubeObserverClassOld2)) {
        desc.removeCoprocessor(CubeObserverClassOld2);
    }
    while (desc.hasCoprocessor(CubeEndpointClass)) {
        desc.removeCoprocessor(CubeEndpointClass);
    }
    while (desc.hasCoprocessor(IIEndpointClass)) {
        desc.removeCoprocessor(IIEndpointClass);
    }
    // remove legacy coprocessor from v1.x
    while (desc.hasCoprocessor(CubeObserverClassOld)) {
        desc.removeCoprocessor(CubeObserverClassOld);
    }
    while (desc.hasCoprocessor(IIEndpointClassOld)) {
        desc.removeCoprocessor(IIEndpointClassOld);
    }
    addCoprocessorOnHTable(desc, hdfsCoprocessorJar);

    // update commit tags
    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        desc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    hbaseAdmin.modifyTable(TableName.valueOf(tableName), desc);

    logger.info("Enable " + tableName);
    hbaseAdmin.enableTable(TableName.valueOf(tableName));

    return true;
}
 
Example 20
Source File: ParameterizedTransactionIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testNonTxToTxTableFailure() throws Exception {
    if (tableDDLOptions.contains("COLUMN_ENCODED_BYTES")) {
        // no need to test this with all variations of column encoding
        return;
    }

    String nonTxTableName = generateUniqueName();

    Connection conn = DriverManager.getConnection(getUrl());
    // Put table in SYSTEM schema to prevent attempts to update the cache after we disable SYSTEM.CATALOG
    conn.createStatement().execute("CREATE TABLE \"SYSTEM\"." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptionsWithoutProvider);
    conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
    conn.commit();
    // Reset empty column value to an empty value like it is pre-transactions
    Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    Put put = new Put(PInteger.INSTANCE.toBytes(1));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
    htable.put(put);
    
    Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
    try {
        // This will succeed initially in updating the HBase metadata, but then will fail when
        // the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
        // the coprocessors back to the non transactional ones.
        conn.createStatement().execute("ALTER TABLE \"SYSTEM\"." + nonTxTableName + " SET TRANSACTIONAL=true,TRANSACTION_PROVIDER='" + transactionProvider + "'");
        fail();
    } catch (SQLException e) {
        if (transactionProvider.isUnsupported(Feature.ALTER_NONTX_TO_TX)) {
            assertEquals(SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL.getErrorCode(), e.getErrorCode());
        } else {
            assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
        }
    } finally {
        admin.enableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
        admin.close();
    }
    
    ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"SYSTEM\"." + nonTxTableName + " WHERE v IS NULL");
    assertTrue(rs.next());
    assertEquals(1,rs.getInt(1));
    assertFalse(rs.next());
    
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    Class<? extends RegionObserver> clazz = transactionProvider.getCoprocessor();
    assertFalse(htable.getDescriptor().getCoprocessors().contains(clazz.getName()));
    assertEquals(1,conn.unwrap(PhoenixConnection.class).getQueryServices().
            getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)).
            getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
}