org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy Java Examples

The following examples show how to use org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CubeHTableUtil.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
/** create a HTable that has the same performance settings as normal cube table, for benchmark purpose */
public static void createBenchmarkHTable(TableName tableName, String cfName) throws IOException {
    Admin admin = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl()).getAdmin();
    try {
        if (admin.tableExists(tableName)) {
            logger.info("disabling hbase table " + tableName);
            admin.disableTable(tableName);
            logger.info("deleting hbase table " + tableName);
            admin.deleteTable(tableName);
        }

        HTableDescriptor tableDesc = new HTableDescriptor(tableName);
        tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());

        KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
        tableDesc.addFamily(createColumnFamily(kylinConfig, cfName, false));

        logger.info("creating hbase table " + tableName);
        admin.createTable(tableDesc, null);
        Preconditions.checkArgument(admin.isTableAvailable(tableName), "table " + tableName + " created, but is not available due to some reasons");
        logger.info("create hbase table " + tableName + " done.");
    } finally {
        IOUtils.closeQuietly(admin);
    }
}
 
Example #2
Source File: CubeHTableUtil.java    From kylin with Apache License 2.0 6 votes vote down vote up
/** create a HTable that has the same performance settings as normal cube table, for benchmark purpose */
public static void createBenchmarkHTable(TableName tableName, String cfName) throws IOException {
    Admin admin = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl()).getAdmin();
    try {
        if (admin.tableExists(tableName)) {
            logger.info("disabling hbase table " + tableName);
            admin.disableTable(tableName);
            logger.info("deleting hbase table " + tableName);
            admin.deleteTable(tableName);
        }

        HTableDescriptor tableDesc = new HTableDescriptor(tableName);
        tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());

        KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
        tableDesc.addFamily(createColumnFamily(kylinConfig, cfName, false));

        logger.info("creating hbase table " + tableName);
        admin.createTable(tableDesc, null);
        Preconditions.checkArgument(admin.isTableAvailable(tableName), "table " + tableName + " created, but is not available due to some reasons");
        logger.info("create hbase table " + tableName + " done.");
    } finally {
        IOUtils.closeQuietly(admin);
    }
}
 
Example #3
Source File: VisibilityController.java    From hbase with Apache License 2.0 6 votes vote down vote up
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getConnection(), LABELS_TABLE_NAME)) {
      TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
        new TableDescriptorBuilder.ModifyableTableDescriptor(LABELS_TABLE_NAME);
      ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
        new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(LABELS_TABLE_FAMILY);
      familyDescriptor.setBloomFilterType(BloomType.NONE);
      // We will cache all the labels. No need of normal
      // table block cache.
      familyDescriptor.setBlockCacheEnabled(false);
      tableDescriptor.setColumnFamily(familyDescriptor);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      tableDescriptor.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
        admin.createTable(tableDescriptor);
      }
    }
  }
 
Example #4
Source File: TestAdmin1.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
    .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName())
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build();
  Table table = TEST_UTIL.createTable(htd, null);
  for (int i = 0; i < 10; i++) {
    Put p = new Put(Bytes.toBytes("row" + i));
    byte[] q1 = Bytes.toBytes("q1");
    byte[] v1 = Bytes.toBytes("v1");
    p.addColumn(Bytes.toBytes("f"), q1, v1);
    table.put(p);
  }
  ADMIN.flush(tableName);
  try {
    ADMIN.split(tableName, Bytes.toBytes("row5"));
    Threads.sleep(10000);
  } catch (Exception e) {
    // Nothing to do.
  }
  // Split should not happen.
  List<RegionInfo> allRegions =
    MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName, true);
  assertEquals(1, allRegions.size());
}
 
Example #5
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
private HStore prepareData() throws IOException {
  Admin admin = TEST_UTIL.getAdmin();
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  admin.createTable(desc);
  Table table = TEST_UTIL.getConnection().getTable(tableName);
  TimeOffsetEnvironmentEdge edge =
      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      ThreadLocalRandom.current().nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
    edge.increment(1001);
  }
  return getStoreWithName(tableName);
}
 
Example #6
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testSanityCheckBlockingStoreFiles() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
  error.expectMessage("is below recommended minimum of 1000 for column family");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #7
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testSanityCheckTTL() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("Default TTL is not supported");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-TTL");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #8
Source File: CreateTables.java    From hadoop-arch-book with Apache License 2.0 5 votes vote down vote up
private static void createValidationRuleTable(HBaseAdmin admin) throws IOException {
  HTableDescriptor tableDescriptor = new HTableDescriptor(HBaseTableMetaModel.validationRulesTableName);

  HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(HBaseTableMetaModel.validationRulesColumnFamily);
  hColumnDescriptor.setMaxVersions(1);

  tableDescriptor.addFamily(hColumnDescriptor);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());

  admin.createTable(tableDescriptor);
}
 
Example #9
Source File: ChangeSplitPolicyAction.java    From hbase with Apache License 2.0 5 votes vote down vote up
public ChangeSplitPolicyAction(TableName tableName) {
  this.tableName = tableName;
  possiblePolicies = new String[] {
      IncreasingToUpperBoundRegionSplitPolicy.class.getName(),
      ConstantSizeRegionSplitPolicy.class.getName(),
      DisabledRegionSplitPolicy.class.getName()
  };
  this.random = new Random();
}
 
Example #10
Source File: StripeCompactionsPerformanceEvaluation.java    From hbase with Apache License 2.0 5 votes vote down vote up
private TableDescriptorBuilder.ModifyableTableDescriptor createHtd(boolean isStripe)
    throws Exception {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE_NAME);
  ColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(COLUMN_FAMILY);
  tableDescriptor.setColumnFamily(familyDescriptor);
  String noSplitsPolicy = DisabledRegionSplitPolicy.class.getName();
  tableDescriptor.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, noSplitsPolicy);
  if (isStripe) {
    tableDescriptor.setValue(StoreEngine.STORE_ENGINE_CLASS_KEY,
      StripeStoreEngine.class.getName());
    if (initialStripeCount != null) {
      tableDescriptor.setValue(
          StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialStripeCount.toString());
      tableDescriptor.setValue(
          HStore.BLOCKING_STOREFILES_KEY, Long.toString(10 * initialStripeCount));
    } else {
      tableDescriptor.setValue(HStore.BLOCKING_STOREFILES_KEY, "500");
    }
    if (splitSize != null) {
      tableDescriptor.setValue(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize.toString());
    }
    if (splitParts != null) {
      tableDescriptor.setValue(StripeStoreConfig.SPLIT_PARTS_KEY, splitParts.toString());
    }
  } else {
    tableDescriptor.setValue(HStore.BLOCKING_STOREFILES_KEY, "10"); // default
  }
  return tableDescriptor;
}
 
Example #11
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testSanityCheckMinVersion() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1)
          .setMinVersions(1).build())
      .build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #12
Source File: LookupTableToHFileJob.java    From kylin with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param sourceTableName
 * @param sourceTable
 * @param kylinConfig
 * @return Pair of HTableName and shard number
 * @throws IOException
 */
private Pair<String, Integer> createHTable(String sourceTableName, IReadableTable sourceTable,
        KylinConfig kylinConfig) throws IOException {
    TableSignature signature = sourceTable.getSignature();
    int shardNum = calculateShardNum(kylinConfig, signature.getSize());
    Connection conn = getHBaseConnection(kylinConfig);
    Admin admin = conn.getAdmin();
    String hTableName = genHTableName(kylinConfig, admin, sourceTableName);

    TableName tableName = TableName.valueOf(hTableName);
    HTableDescriptor hTableDesc = new HTableDescriptor(tableName);
    hTableDesc.setCompactionEnabled(false);
    hTableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());
    hTableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
    hTableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis()));
    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        hTableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    HColumnDescriptor cf = CubeHTableUtil.createColumnFamily(kylinConfig, HBaseLookupRowEncoder.CF_STRING, false);
    hTableDesc.addFamily(cf);

    try {
        if (shardNum > 1) {
            admin.createTable(hTableDesc, getSplitsByShardNum(shardNum));
        } else {
            admin.createTable(hTableDesc);
        }
    } finally {
        IOUtils.closeQuietly(admin);
    }
    return new Pair<>(hTableName, shardNum);
}
 
Example #13
Source File: LookupTableToHFileJob.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param sourceTableName
 * @param sourceTable
 * @param kylinConfig
 * @return Pair of HTableName and shard number
 * @throws IOException
 */
private Pair<String, Integer> createHTable(String sourceTableName, IReadableTable sourceTable,
        KylinConfig kylinConfig) throws IOException {
    TableSignature signature = sourceTable.getSignature();
    int shardNum = calculateShardNum(kylinConfig, signature.getSize());
    Connection conn = getHBaseConnection(kylinConfig);
    Admin admin = conn.getAdmin();
    String hTableName = genHTableName(kylinConfig, admin, sourceTableName);

    TableName tableName = TableName.valueOf(hTableName);
    HTableDescriptor hTableDesc = new HTableDescriptor(tableName);
    hTableDesc.setCompactionEnabled(false);
    hTableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());
    hTableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
    hTableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis()));
    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        hTableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    HColumnDescriptor cf = CubeHTableUtil.createColumnFamily(kylinConfig, HBaseLookupRowEncoder.CF_STRING, false);
    hTableDesc.addFamily(cf);

    try {
        if (shardNum > 1) {
            admin.createTable(hTableDesc, getSplitsByShardNum(shardNum));
        } else {
            admin.createTable(hTableDesc);
        }
    } finally {
        IOUtils.closeQuietly(admin);
    }
    return new Pair<>(hTableName, shardNum);
}
 
Example #14
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Unit test for HBASE-21504
 */
@Test
public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
  TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  Table table = TEST_UTIL.createTable(desc, null);
  long ts = System.currentTimeMillis() - 10 * 1000;
  Put put =
      new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
  table.put(put);
  TEST_UTIL.getAdmin().flush(tableName); // HFile-0
  put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
  table.put(put);
  final int testWaitTimeoutMs = 20000;
  TEST_UTIL.getAdmin().flush(tableName); // HFile-1

  HStore store = Preconditions.checkNotNull(getStoreWithName(tableName));
  Assert.assertEquals(2, store.getStorefilesCount());

  TEST_UTIL.getAdmin().majorCompact(tableName);
  TEST_UTIL.waitFor(testWaitTimeoutMs,
      (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);

  Assert.assertEquals(1, store.getStorefilesCount());
  HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
  Assert.assertEquals(0, sf.getReader().getEntries());

  put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
  table.put(put);
  TEST_UTIL.getAdmin().flush(tableName); // HFile-2
  Assert.assertEquals(2, store.getStorefilesCount());

  TEST_UTIL.getAdmin().majorCompact(tableName);
  TEST_UTIL.waitFor(testWaitTimeoutMs,
      (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);

  Assert.assertEquals(1, store.getStorefilesCount());
  sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
  Assert.assertEquals(0, sf.getReader().getEntries());
}
 
Example #15
Source File: CubeHTableUtil.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
public static void createHTable(CubeSegment cubeSegment, byte[][] splitKeys) throws IOException {
    String tableName = cubeSegment.getStorageLocationIdentifier();
    CubeInstance cubeInstance = cubeSegment.getCubeInstance();
    CubeDesc cubeDesc = cubeInstance.getDescriptor();
    KylinConfig kylinConfig = cubeDesc.getConfig();

    HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(cubeSegment.getStorageLocationIdentifier()));
    tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());
    tableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix());
    tableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis()));

    if (!StringUtils.isEmpty(kylinConfig.getKylinOwner())) {
        //HTableOwner is the team that provides kylin service
        tableDesc.setValue(IRealizationConstants.HTableOwner, kylinConfig.getKylinOwner());
    }

    String commitInfo = KylinVersion.getGitCommitInfo();
    if (!StringUtils.isEmpty(commitInfo)) {
        tableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo);
    }

    //HTableUser is the cube owner, which will be the "user"
    tableDesc.setValue(IRealizationConstants.HTableUser, cubeInstance.getOwner());

    tableDesc.setValue(IRealizationConstants.HTableSegmentTag, cubeSegment.toString());

    Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();
    Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl());
    Admin admin = conn.getAdmin();

    try {
        if (User.isHBaseSecurityEnabled(conf)) {
            // add coprocessor for bulk load
            tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
        }

        for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) {
            HColumnDescriptor cf = createColumnFamily(kylinConfig, cfDesc.getName(), cfDesc.isMemoryHungry());
            tableDesc.addFamily(cf);
        }

        if (admin.tableExists(TableName.valueOf(tableName))) {
            // admin.disableTable(tableName);
            // admin.deleteTable(tableName);
            throw new RuntimeException("HBase table " + tableName + " exists!");
        }

        DeployCoprocessorCLI.deployCoprocessor(tableDesc);

        admin.createTable(tableDesc, splitKeys);
        Preconditions.checkArgument(admin.isTableAvailable(TableName.valueOf(tableName)), "table " + tableName + " created, but is not available due to some reasons");
        logger.info("create hbase table " + tableName + " done.");
    } finally {
        IOUtils.closeQuietly(admin);
    }

}
 
Example #16
Source File: IntegrationTestRegionReplicaPerf.java    From hbase with Apache License 2.0 4 votes vote down vote up
public void test() throws Exception {
  int maxIters = 3;
  String replicas = "--replicas=" + replicaCount;
  // TODO: splits disabled until "phase 2" is complete.
  String splitPolicy = "--splitPolicy=" + DisabledRegionSplitPolicy.class.getName();
  String writeOpts = format("%s --nomapred --table=%s --presplit=16 sequentialWrite 4",
    splitPolicy, tableName);
  String readOpts =
    format("--nomapred --table=%s --latency --sampleRate=0.1 randomRead 4", tableName);
  String replicaReadOpts = format("%s %s", replicas, readOpts);

  ArrayList<TimingResult> resultsWithoutReplicas = new ArrayList<>(maxIters);
  ArrayList<TimingResult> resultsWithReplicas = new ArrayList<>(maxIters);

  // create/populate the table, replicas disabled
  LOG.debug("Populating table.");
  new PerfEvalCallable(util.getAdmin(), writeOpts).call();

  // one last sanity check, then send in the clowns!
  assertEquals("Table must be created with DisabledRegionSplitPolicy. Broken test.",
      DisabledRegionSplitPolicy.class.getName(),
      util.getAdmin().getDescriptor(tableName).getRegionSplitPolicyClassName());
  startMonkey();

  // collect a baseline without region replicas.
  for (int i = 0; i < maxIters; i++) {
    LOG.debug("Launching non-replica job " + (i + 1) + "/" + maxIters);
    resultsWithoutReplicas.add(new PerfEvalCallable(util.getAdmin(), readOpts).call());
    // TODO: sleep to let cluster stabilize, though monkey continues. is it necessary?
    Thread.sleep(5000l);
  }

  // disable monkey, enable region replicas, enable monkey
  cleanUpMonkey("Altering table.");
  LOG.debug("Altering " + tableName + " replica count to " + replicaCount);
  IntegrationTestingUtility.setReplicas(util.getAdmin(), tableName, replicaCount);
  setUpMonkey();
  startMonkey();

  // run test with region replicas.
  for (int i = 0; i < maxIters; i++) {
    LOG.debug("Launching replica job " + (i + 1) + "/" + maxIters);
    resultsWithReplicas.add(new PerfEvalCallable(util.getAdmin(), replicaReadOpts).call());
    // TODO: sleep to let cluster stabilize, though monkey continues. is it necessary?
    Thread.sleep(5000l);
  }

  // compare the average of the stdev and 99.99pct across runs to determine if region replicas
  // are having an overall improvement on response variance experienced by clients.
  double withoutReplicasStdevMean =
      calcMean("withoutReplicas", Stat.STDEV, resultsWithoutReplicas);
  double withoutReplicas9999Mean =
      calcMean("withoutReplicas", Stat.FOUR_9S, resultsWithoutReplicas);
  double withReplicasStdevMean =
      calcMean("withReplicas", Stat.STDEV, resultsWithReplicas);
  double withReplicas9999Mean =
      calcMean("withReplicas", Stat.FOUR_9S, resultsWithReplicas);

  LOG.info(MoreObjects.toStringHelper(this)
    .add("withoutReplicas", resultsWithoutReplicas)
    .add("withReplicas", resultsWithReplicas)
    .add("withoutReplicasStdevMean", withoutReplicasStdevMean)
    .add("withoutReplicas99.99Mean", withoutReplicas9999Mean)
    .add("withReplicasStdevMean", withReplicasStdevMean)
    .add("withReplicas99.99Mean", withReplicas9999Mean)
    .toString());

  assertTrue(
    "Running with region replicas under chaos should have less request variance than without. "
    + "withReplicas.stdev.mean: " + withReplicasStdevMean + "ms "
    + "withoutReplicas.stdev.mean: " + withoutReplicasStdevMean + "ms.",
    withReplicasStdevMean <= withoutReplicasStdevMean);
  assertTrue(
      "Running with region replicas under chaos should improve 99.99pct latency. "
          + "withReplicas.99.99.mean: " + withReplicas9999Mean + "ms "
          + "withoutReplicas.99.99.mean: " + withoutReplicas9999Mean + "ms.",
      withReplicas9999Mean <= withoutReplicas9999Mean);
}
 
Example #17
Source File: CreateTables.java    From hadoop-arch-book with Apache License 2.0 4 votes vote down vote up
private static void createProfileCacheTable(HBaseAdmin admin) throws IOException {
  HTableDescriptor tableDescriptor = new HTableDescriptor(HBaseTableMetaModel.profileCacheTableName);

  HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(HBaseTableMetaModel.profileCacheColumnFamily);
  hColumnDescriptor.setMaxVersions(1);


  tableDescriptor.addFamily(hColumnDescriptor);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName());

  byte[][] splitKeys = new byte[HBaseTableMetaModel.profileCacheNumberOfProfileCacheSalts][];

  for (int i = 0; i < HBaseTableMetaModel.profileCacheNumberOfProfileCacheSalts; i++) {
    char salt = (char)('A' + i);
    splitKeys[i] = Bytes.toBytes(salt);
  }

  admin.createTable(tableDescriptor, splitKeys);
}