Java Code Examples for org.apache.hadoop.hbase.HTableDescriptor#addCoprocessor()

The following examples show how to use org.apache.hadoop.hbase.HTableDescriptor#addCoprocessor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSnapshotFilter.java    From phoenix-omid with Apache License 2.0 6 votes vote down vote up
private void createTableIfNotExists(String tableName, byte[]... families) throws IOException {
    if (!admin.tableExists(TableName.valueOf(tableName))) {
        LOG.info("Creating {} table...", tableName);
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        for (byte[] family : families) {
            HColumnDescriptor datafam = new HColumnDescriptor(family);
            datafam.setMaxVersions(MAX_VERSIONS);
            desc.addFamily(datafam);
        }

        int priority = Coprocessor.PRIORITY_HIGHEST;

        desc.addCoprocessor(OmidSnapshotFilter.class.getName(),null,++priority,null);
        desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation",null,++priority,null);

        admin.createTable(desc);
        try {
            hbaseTestUtil.waitTableAvailable(TableName.valueOf(tableName),5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}
 
Example 2
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static HTable createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return new HTable(testUtil.getConfiguration(), tableName);
}
 
Example 3
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1});
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
                     new LocalRegionServerServices(conf, ServerName.valueOf(
                       InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example 4
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  WALFactory walFactory = new WALFactory(conf, null, tableName + ".hlog");
  WAL hLog = walFactory.getWAL(new byte[]{1}, null);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd,
      new LocalRegionServerServices(conf, ServerName.valueOf(
          InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
 
Example 5
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor cfd = new HColumnDescriptor(family);
  if (ttl > 0) {
    cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
  }
  cfd.setMaxVersions(10);
  htd.addFamily(cfd);
  htd.addCoprocessor(TransactionProcessor.class.getName());
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
  Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
  FileSystem fs = FileSystem.get(conf);
  assertTrue(fs.mkdirs(tablePath));
  HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
  HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
  HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
  return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}
 
Example 6
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static HTable createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return new HTable(testUtil.getConfiguration(), tableName);
}
 
Example 7
Source File: AbstractHBaseTableTest.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
protected static Table createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData,
                                    List<String> coprocessors) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for (byte[] family : columnFamilies) {
    HColumnDescriptor columnDesc = new HColumnDescriptor(family);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis
    desc.addFamily(columnDesc);
  }
  if (existingData) {
    desc.setValue(TxConstants.READ_NON_TX_DATA, "true");
  }
  // Divide individually to prevent any overflow
  int priority = Coprocessor.PRIORITY_USER;
  // order in list is the same order that coprocessors will be invoked
  for (String coprocessor : coprocessors) {
    desc.addCoprocessor(coprocessor, null, ++priority, null);
  }
  hBaseAdmin.createTable(desc);
  testUtil.waitTableAvailable(tableName, 5000);
  return testUtil.getConnection().getTable(TableName.valueOf(tableName));
}
 
Example 8
Source File: TestReplicaWithCluster.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testReplicaGetWithPrimaryDown() throws IOException {
  // Create table then get the single region for our new table.
  HTableDescriptor hdt = HTU.createTableDescriptor(TableName.valueOf("testCreateDeleteTable"),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  hdt.setRegionReplication(NB_SERVERS);
  hdt.addCoprocessor(RegionServerStoppedCopro.class.getName());
  try {
    Table table = HTU.createTable(hdt, new byte[][] { f }, null);

    Put p = new Put(row);
    p.addColumn(f, row, row);
    table.put(p);

    // Flush so it can be picked by the replica refresher thread
    HTU.flush(table.getName());

    // Sleep for some time until data is picked up by replicas
    try {
      Thread.sleep(2 * REFRESH_PERIOD);
    } catch (InterruptedException e1) {
      LOG.error(e1.toString(), e1);
    }

    // But if we ask for stale we will get it
    Get g = new Get(row);
    g.setConsistency(Consistency.TIMELINE);
    Result r = table.get(g);
    Assert.assertTrue(r.isStale());
  } finally {
    HTU.getAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
  }
}
 
Example 9
Source File: BalanceBooks.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
protected void createTableIfNotExists(Configuration conf, byte[] tableName, byte[][] columnFamilies)
    throws IOException {
  try (HBaseAdmin admin = new HBaseAdmin(conf)) {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    for (byte[] family : columnFamilies) {
      HColumnDescriptor columnDesc = new HColumnDescriptor(family);
      columnDesc.setMaxVersions(Integer.MAX_VALUE);
      desc.addFamily(columnDesc);
    }
    desc.addCoprocessor(TransactionProcessor.class.getName());
    admin.createTable(desc);
  }
}
 
Example 10
Source File: BalanceBooks.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
protected void createTableIfNotExists(Configuration conf, byte[] tableName, byte[][] columnFamilies)
    throws IOException {
  try (Admin admin = this.conn.getAdmin()) {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    for (byte[] family : columnFamilies) {
      HColumnDescriptor columnDesc = new HColumnDescriptor(family);
      columnDesc.setMaxVersions(Integer.MAX_VALUE);
      desc.addFamily(columnDesc);
    }
    desc.addCoprocessor(TransactionProcessor.class.getName());
    admin.createTable(desc);
  }
}
 
Example 11
Source File: TestReplicasClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  // enable store file refreshing
  HTU.getConfiguration().setInt(
      StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
  HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
  HTU.getConfiguration().setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true);
  StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(1).
      numAlwaysStandByMasters(1).numMasters(1).build();
  HTU.startMiniCluster(option);

  // Create table then get the single region for our new table.
  HTableDescriptor hdt = HTU.createTableDescriptor(
    TableName.valueOf(TestReplicasClient.class.getSimpleName()),
    HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
    HColumnDescriptor.DEFAULT_KEEP_DELETED);
  hdt.addCoprocessor(SlowMeCopro.class.getName());
  HTU.createTable(hdt, new byte[][]{f}, null);
  TABLE_NAME = hdt.getTableName();
  try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) {
    hriPrimary = locator.getRegionLocation(row, false).getRegion();
  }

  // mock a secondary region info to open
  hriSecondary = RegionReplicaUtil.getRegionInfoForReplica(hriPrimary, 1);

  // No master
  LOG.info("Master is going to be stopped");
  TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
  Configuration c = new Configuration(HTU.getConfiguration());
  c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
  LOG.info("Master has stopped");
}
 
Example 12
Source File: EnrichmentCoprocessorIntegrationTest.java    From metron with Apache License 2.0 5 votes vote down vote up
private static void addCoprocessor(TableName tableName) throws IOException {
  // https://hbase.apache.org/1.1/book.html#cp_loading
  Admin hbaseAdmin = testUtil.getConnection().getAdmin();
  hbaseAdmin.disableTable(tableName);
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  htd.addCoprocessor(EnrichmentCoprocessor.class.getCanonicalName());
  hbaseAdmin.modifyTable(tableName, htd);
  hbaseAdmin.enableTable(tableName);
}
 
Example 13
Source File: TestWithHBaseCoprocessor.java    From eagle with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpHBase() throws IOException {
    System.setProperty("config.resource", "/application-co.conf");
    Configuration conf = HBaseConfiguration.create();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AggregateProtocolEndPoint.class.getName());
    conf.set("zookeeper.znode.parent", getZkZnodeParent());
    conf.setInt("hbase.master.info.port", -1);//avoid port clobbering
    conf.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering

    int attempts = 0;
    hbase = new HBaseTestingUtility(conf);
    boolean successToStart = false;
    while (attempts < 3) {
        try {
            attempts ++;
            hbase.startMiniCluster();
            successToStart = true;
        } catch (Exception e) {
            LOG.error("Error to start mini cluster (tried {} times): {}", attempts, e.getMessage(), e);
            try {
                hbase.shutdownMiniCluster();
            } catch (Exception e1) {
                LOG.warn(e.getMessage(), e);
            }
        }
    }

    Assert.assertTrue("Failed to start mini cluster in " + attempts + " attempts", successToStart);

    HTable table = hbase.createTable(String.valueOf("unittest"),"f");
    HTableDescriptor descriptor = new HTableDescriptor(table.getTableDescriptor());
    descriptor.addCoprocessor(AggregateProtocolEndPoint.class.getName());
    hbase.getHBaseAdmin().modifyTable("unittest",descriptor);

    System.setProperty("storage.hbase.autoCreateTable","false");
    System.setProperty("storage.hbase.coprocessorEnabled", String.valueOf(true));
    System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent());
    System.setProperty("storage.hbase.zookeeperPropertyClientPort", String.valueOf(hbase.getZkCluster().getClientPort()));
}
 
Example 14
Source File: TestCoprocessorWhitelistMasterObserver.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test a table modification adding a coprocessor path
 * which is whitelisted. The coprocessor should be added to
 * the table descriptor successfully.
 * @param whitelistedPaths A String array of paths to add in
 *         for the whitelisting configuration
 * @param coprocessorPath A String to use as the
 *         path for a mock coprocessor
 */
private static void negativeTestCase(String[] whitelistedPaths,
    String coprocessorPath) throws Exception {
  Configuration conf = UTIL.getConfiguration();
  conf.setInt("hbase.client.retries.number", 5);
  // load coprocessor under test
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      CoprocessorWhitelistMasterObserver.class.getName());
  // set retries low to raise exception quickly
  // set a coprocessor whitelist path for test
  conf.setStrings(
      CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
      whitelistedPaths);
  UTIL.startMiniCluster();
  UTIL.createTable(TEST_TABLE, new byte[][] { TEST_FAMILY });
  UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
  Connection connection = ConnectionFactory.createConnection(conf);
  Admin admin = connection.getAdmin();
  // disable table so we do not actually try loading non-existant
  // coprocessor file
  admin.disableTable(TEST_TABLE);
  Table t = connection.getTable(TEST_TABLE);
  HTableDescriptor htd = new HTableDescriptor(t.getDescriptor());
  htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
    new Path(coprocessorPath),
    Coprocessor.PRIORITY_USER, null);
  LOG.info("Modifying Table");
  admin.modifyTable(htd);
  assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size());
  LOG.info("Done Modifying Table");
}
 
Example 15
Source File: Indexer.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Enable indexing on the given table
 * @param desc {@link HTableDescriptor} for the table on which indexing should be enabled
 * @param builder class to use when building the index for this table
 * @param properties map of custom configuration options to make available to your
 *          {@link IndexBuilder} on the server-side
 * @throws IOException the Indexer coprocessor cannot be added
 */
public static void enableIndexing(HTableDescriptor desc, Class<? extends IndexBuilder> builder,
    Map<String, String> properties) throws IOException {
  if (properties == null) {
    properties = new HashMap<String, String>();
  }
  properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName());
  desc.addCoprocessor(Indexer.class.getName(), null, Coprocessor.PRIORITY_USER, properties);
}
 
Example 16
Source File: HBaseSITestEnv.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
private static HTableDescriptor generateTransactionTable() throws IOException{
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("splice",HConfiguration.TRANSACTION_TABLE));
    desc.addCoprocessor(TxnLifecycleEndpoint.class.getName());

    HColumnDescriptor columnDescriptor = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES);
    columnDescriptor.setMaxVersions(5);
    columnDescriptor.setCompressionType(Compression.Algorithm.NONE);
    columnDescriptor.setInMemory(true);
    columnDescriptor.setBlockCacheEnabled(true);
    columnDescriptor.setBloomFilterType(BloomType.ROWCOL);
    desc.addFamily(columnDescriptor);
    desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SIConstants.SI_PERMISSION_FAMILY)));
    return desc;
}
 
Example 17
Source File: CubeHTableUtil.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static void createHTable(CubeSegment cubeSegment, byte[][] splitKeys) throws IOException {
    String tableName = cubeSegment.getStorageLocationIdentifier();
    CubeInstance cubeInstance = cubeSegment.getCubeInstance();
    CubeDesc cubeDesc = cubeInstance.getDescriptor();
    KylinConfig kylinConfig = cubeDesc.getConfig();

    HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(cubeSegment.getStorageLocationIdentifier()));
    tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());
    tableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
    tableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis()));

    if (!StringUtils.isEmpty(kylinConfig.getKylinOwner())) {
        //HTableOwner is the team that provides kylin service
        tableDesc.setValue(IRealizationConstants.HTableOwner, kylinConfig.getKylinOwner());
    }

    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        tableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    //HTableUser is the cube owner, which will be the "user"
    tableDesc.setValue(IRealizationConstants.HTableUser, cubeInstance.getOwner());

    tableDesc.setValue(IRealizationConstants.HTableSegmentTag, cubeSegment.toString());

    Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();
    Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
    Admin admin = conn.getAdmin();

    try {
        if (User.isHBaseSecurityEnabled(conf)) {
            // add coprocessor for bulk load
            tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
        }

        for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) {
            HColumnDescriptor cf = createColumnFamily(kylinConfig, cfDesc.getName(), cfDesc.isMemoryHungry());
            tableDesc.addFamily(cf);
        }

        if (admin.tableExists(TableName.valueOf(tableName))) {
            // admin.disableTable(tableName);
            // admin.deleteTable(tableName);
            throw new RuntimeException("HBase table " + tableName + " exists!");
        }

        DeployCoprocessorCLI.deployCoprocessor(tableDesc);

        admin.createTable(tableDesc, splitKeys);
        Preconditions.checkArgument(admin.isTableAvailable(TableName.valueOf(tableName)), "table " + tableName + " created, but is not available due to some reasons");
        logger.info("create hbase table " + tableName + " done.");
    } finally {
        IOUtils.closeQuietly(admin);
    }

}
 
Example 18
Source File: IndexLoadBalancerIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 180000)
public void testColocationAfterSplit() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    // Table names to make use of the
    TableName tableName = TableName.valueOf("testSplitHooksBeforeAndAfterPONR_1");
    TableName indexTableName = TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2");
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addCoprocessor(MockedRegionObserver.class.getName());
    htd.addFamily(new HColumnDescriptor("cf"));
    char c = 'A';
    byte[][] split = new byte[20][];
    for (int i = 0; i < 20; i++) {
        byte[] b = { (byte) c };
        split[i] = b;
        c++;
    }
    admin.createTable(htd, split);
    HTableDescriptor iHtd = new HTableDescriptor(indexTableName);
    iHtd.addFamily(new HColumnDescriptor("cf"));
    iHtd.setValue(IndexLoadBalancer.PARENT_TABLE_KEY, tableName.toBytes());
    admin.createTable(iHtd, split);

    // test put with the indexed column

    insertData(tableName);
    insertData(indexTableName);

    admin.split(tableName.getNameAsString(), "c");
    List<HRegionInfo> regionsOfUserTable =
            master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);

    while (regionsOfUserTable.size() != 22) {
        Thread.sleep(100);
        regionsOfUserTable =
                master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
    }

    List<HRegionInfo> regionsOfIndexTable =
            master.getAssignmentManager().getRegionStates().getRegionsOfTable(indexTableName);

    while (regionsOfIndexTable.size() != 22) {
        Thread.sleep(100);
        regionsOfIndexTable =
                master.getAssignmentManager().getRegionStates().getRegionsOfTable(
                    indexTableName);
    }
    boolean isRegionColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionColocated);
}
 
Example 19
Source File: TestServerBusyException.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test()
public void testServerBusyException() throws Exception {
  HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()));
  hdt.addCoprocessor(SleepCoprocessor.class.getName());
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c);

  TestGetThread tg1 =
      new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestGetThread tg2 =
      new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestGetThread tg3 =
      new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestGetThread tg4 =
      new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestGetThread tg5 =
      new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  tg1.start();
  tg2.start();
  tg3.start();
  tg4.start();
  tg5.start();
  tg1.join();
  tg2.join();
  tg3.join();
  tg4.join();
  tg5.join();
  assertEquals(2,
      tg1.getServerBusyException + tg2.getServerBusyException + tg3.getServerBusyException
          + tg4.getServerBusyException + tg5.getServerBusyException);

  // Put has its own logic in HTable, test Put alone. We use AsyncProcess for Put (use multi at
  // RPC level) and it wrap exceptions to RetriesExhaustedWithDetailsException.

  TestPutThread tp1 =
      new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestPutThread tp2 =
      new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestPutThread tp3 =
      new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestPutThread tp4 =
      new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  TestPutThread tp5 =
      new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
  tp1.start();
  tp2.start();
  tp3.start();
  tp4.start();
  tp5.start();
  tp1.join();
  tp2.join();
  tp3.join();
  tp4.join();
  tp5.join();
  assertEquals(2,
      tp1.getServerBusyException + tp2.getServerBusyException + tp3.getServerBusyException
          + tp4.getServerBusyException + tp5.getServerBusyException);
}
 
Example 20
Source File: IICreateHTableJob.java    From Kylin with Apache License 2.0 4 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
    Options options = new Options();

    try {
        options.addOption(OPTION_II_NAME);
        options.addOption(OPTION_HTABLE_NAME);
        parseOptions(options, args);

        String tableName = getOptionValue(OPTION_HTABLE_NAME);
        String iiName = getOptionValue(OPTION_II_NAME);

        KylinConfig config = KylinConfig.getInstanceFromEnv();
        IIManager iiManager = IIManager.getInstance(config);
        IIInstance ii = iiManager.getII(iiName);
        int sharding = ii.getDescriptor().getSharding();

        HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
        HColumnDescriptor cf = new HColumnDescriptor(IIDesc.HBASE_FAMILY);
        cf.setMaxVersions(1);
        //cf.setCompressionType(Algorithm.LZO);
        cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
        tableDesc.addFamily(cf);
        tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix());

        Configuration conf = HBaseConfiguration.create(getConf());
        if (User.isHBaseSecurityEnabled(conf)) {
            // add coprocessor for bulk load
            tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
        }

        DeployCoprocessorCLI.deployCoprocessor(tableDesc);

        // drop the table first
        HBaseAdmin admin = new HBaseAdmin(conf);
        if (admin.tableExists(tableName)) {
            admin.disableTable(tableName);
            admin.deleteTable(tableName);
        }

        // create table
        byte[][] splitKeys = getSplits(sharding);
        if (splitKeys.length == 0)
            splitKeys = null;
        admin.createTable(tableDesc, splitKeys);
        if (splitKeys != null) {
            for (int i = 0; i < splitKeys.length; i++) {
                System.out.println("split key " + i + ": " + BytesUtil.toHex(splitKeys[i]));
            }
        }
        System.out.println("create hbase table " + tableName + " done.");
        admin.close();

        return 0;
    } catch (Exception e) {
        printUsage(options);
        throw e;
    }
}