org.apache.hadoop.hbase.regionserver.HStore Java Examples

The following examples show how to use org.apache.hadoop.hbase.regionserver.HStore. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestFromClientSideScanExcpetion.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected List<KeyValueScanner> selectScannersFrom(HStore store,
    List<? extends KeyValueScanner> allScanners) {
  List<KeyValueScanner> scanners = super.selectScannersFrom(store, allScanners);
  List<KeyValueScanner> newScanners = new ArrayList<>(scanners.size());
  for (KeyValueScanner scanner : scanners) {
    newScanners.add(new DelegatingKeyValueScanner(scanner) {
      @Override
      public boolean reseek(Cell key) throws IOException {
        if (ON.get()) {
          REQ_COUNT.incrementAndGet();
          if (!THROW_ONCE.get() || REQ_COUNT.get() == 1) {
            if (IS_DO_NOT_RETRY.get()) {
              throw new DoNotRetryIOException("Injected exception");
            } else {
              throw new IOException("Injected exception");
            }
          }
        }
        return super.reseek(key);
      }
    });
  }
  return newScanners;
}
 
Example #2
Source File: TestRegionCoprocessorHost.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testPreStoreScannerOpen() throws IOException {

  RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
  Scan scan = new Scan();
  scan.setTimeRange(TimeRange.INITIAL_MIN_TIMESTAMP, TimeRange.INITIAL_MAX_TIMESTAMP);
  assertTrue("Scan is not for all time", scan.getTimeRange().isAllTime());
  //SimpleRegionObserver is set to update the ScanInfo parameters if the passed-in scan
  //is for all time. this lets us exercise both that the Scan is wired up properly in the coproc
  //and that we can customize the metadata

  ScanInfo oldScanInfo = getScanInfo();

  HStore store = mock(HStore.class);
  when(store.getScanInfo()).thenReturn(oldScanInfo);
  ScanInfo newScanInfo = host.preStoreScannerOpen(store, scan);

  verifyScanInfo(newScanInfo);
}
 
Example #3
Source File: TestMasterRegionFlush.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  conf = HBaseConfiguration.create();
  region = mock(HRegion.class);
  HStore store = mock(HStore.class);
  when(store.getStorefilesCount()).thenReturn(1);
  when(region.getStores()).thenReturn(Collections.singletonList(store));
  when(region.getRegionInfo())
    .thenReturn(RegionInfoBuilder.newBuilder(TableName.valueOf("hbase:local")).build());
  flushCalled = new AtomicInteger(0);
  memstoreHeapSize = new AtomicLong(0);
  memstoreOffHeapSize = new AtomicLong(0);
  when(region.getMemStoreHeapSize()).thenAnswer(invocation -> memstoreHeapSize.get());
  when(region.getMemStoreOffHeapSize()).thenAnswer(invocation -> memstoreOffHeapSize.get());
  when(region.flush(anyBoolean())).thenAnswer(invocation -> {
    assertTrue(invocation.getArgument(0));
    memstoreHeapSize.set(0);
    memstoreOffHeapSize.set(0);
    flushCalled.incrementAndGet();
    return null;
  });
}
 
Example #4
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
private HStore prepareData() throws IOException {
  Admin admin = TEST_UTIL.getAdmin();
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  admin.createTable(desc);
  Table table = TEST_UTIL.getConnection().getTable(tableName);
  TimeOffsetEnvironmentEdge edge =
      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      ThreadLocalRandom.current().nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
    edge.increment(1001);
  }
  return getStoreWithName(tableName);
}
 
Example #5
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testPurgeExpiredFiles() throws Exception {
  HStore store = prepareData();
  assertEquals(10, store.getStorefilesCount());
  TEST_UTIL.getAdmin().majorCompact(tableName);
  TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {

    @Override
    public boolean evaluate() throws Exception {
      return store.getStorefilesCount() == 1;
    }

    @Override
    public String explainFailure() throws Exception {
      return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
    }
  });
}
 
Example #6
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testSanityCheckBlockingStoreFiles() throws IOException {
  error.expect(DoNotRetryIOException.class);
  error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
  error.expectMessage("is below recommended minimum of 1000 for column family");
  TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
  TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
      .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
        FIFOCompactionPolicy.class.getName())
      .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DisabledRegionSplitPolicy.class.getName())
      .setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
      .build();
  TEST_UTIL.getAdmin().createTable(desc);
}
 
Example #7
Source File: TestCompactionWithThroughputController.java    From hbase with Apache License 2.0 6 votes vote down vote up
private HStore prepareData() throws IOException {
  Admin admin = TEST_UTIL.getAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  Table table = TEST_UTIL.createTable(tableName, family);
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      ThreadLocalRandom.current().nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
  }
  return getStoreWithName(tableName);
}
 
Example #8
Source File: TestCompactionWithThroughputController.java    From hbase with Apache License 2.0 6 votes vote down vote up
private long testCompactionWithoutThroughputLimit() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    NoLimitThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    HStore store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
Example #9
Source File: MasterRegionFlusherAndCompactor.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void moveHFileToGlobalArchiveDir() throws IOException {
  FileSystem fs = region.getRegionFileSystem().getFileSystem();
  for (HStore store : region.getStores()) {
    store.closeAndArchiveCompactedFiles();
    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(),
      store.getColumnFamilyDescriptor().getName());
    Path globalStoreArchiveDir = HFileArchiveUtil.getStoreArchivePathForArchivePath(
      globalArchivePath, region.getRegionInfo(), store.getColumnFamilyDescriptor().getName());
    try {
      MasterRegionUtils.moveFilesUnderDir(fs, storeArchiveDir, globalStoreArchiveDir,
        archivedHFileSuffix);
    } catch (IOException e) {
      LOG.warn("Failed to move archived hfiles from {} to global dir {}", storeArchiveDir,
        globalStoreArchiveDir, e);
    }
  }
}
 
Example #10
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Do a small get/scan against one store. This is required because store
 * has no actual methods of querying itself, and relies on StoreScanner.
 */
public static List<Cell> getFromStoreFile(HStore store,
                                              Get get) throws IOException {
  Scan scan = new Scan(get);
  InternalScanner scanner = (InternalScanner) store.getScanner(scan,
      scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
      // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
      // readpoint 0.
      0);

  List<Cell> result = new ArrayList<>();
  scanner.next(result);
  if (!result.isEmpty()) {
    // verify that we are on the row we want:
    Cell kv = result.get(0);
    if (!CellUtil.matchingRows(kv, get.getRow())) {
      result.clear();
    }
  }
  scanner.close();
  return result;
}
 
Example #11
Source File: DefaultMobStoreCompactor.java    From hbase with Apache License 2.0 6 votes vote down vote up
public DefaultMobStoreCompactor(Configuration conf, HStore store) {
  super(conf, store);
  // The mob cells reside in the mob-enabled column family which is held by HMobStore.
  // During the compaction, the compactor reads the cells from the mob files and
  // probably creates new mob files. All of these operations are included in HMobStore,
  // so we need to cast the Store to HMobStore.
  if (!(store instanceof HMobStore)) {
    throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
  }
  this.mobStore = (HMobStore) store;
  this.mobSizeThreshold = store.getColumnFamilyDescriptor().getMobThreshold();
  this.ioOptimizedMode = conf.get(MobConstants.MOB_COMPACTION_TYPE_KEY,
    MobConstants.DEFAULT_MOB_COMPACTION_TYPE).
      equals(MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE);

}
 
Example #12
Source File: TestRefreshHFilesEndpoint.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public List<HStore> getStores() {
  List<HStore> list = new ArrayList<>(stores.size());
  /*
   * This is used to trigger the custom definition (faulty)
   * of refresh HFiles API.
   */
  try {
    if (this.store == null) {
      store = new HStoreWithFaultyRefreshHFilesAPI(this,
          ColumnFamilyDescriptorBuilder.of(FAMILY), this.conf);
    }
    list.add(store);
  } catch (IOException ioe) {
    LOG.info("Couldn't instantiate custom store implementation", ioe);
  }

  list.addAll(stores.values());
  return list;
}
 
Example #13
Source File: TestFlushWithThroughputController.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Writes Puts to the table and flushes few times.
 * @return {@link Pair} of (throughput, duration).
 */
private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
  // Internally, throughput is controlled after every cell write, so keep value size less for
  // better control.
  final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
  Random rand = new Random();
  long duration = 0;
  for (int i = 0; i < NUM_FLUSHES; i++) {
    // Write about 10M (10 times of throughput rate) per iteration.
    for (int j = 0; j < NUM_PUTS; j++) {
      byte[] value = new byte[VALUE_SIZE];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    long startTime = System.nanoTime();
    hbtu.getAdmin().flush(tableName);
    duration += System.nanoTime() - startTime;
  }
  HStore store = getStoreWithName(tableName);
  assertEquals(NUM_FLUSHES, store.getStorefilesCount());
  double throughput = (double)store.getStorefilesSize()
      / TimeUnit.NANOSECONDS.toSeconds(duration);
  return new Pair<>(throughput, duration);
}
 
Example #14
Source File: TestZooKeeperTableArchiveClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  HStore s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compact(true);
}
 
Example #15
Source File: MutableIndexExtendedIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testCompactNonPhoenixTable() throws Exception {
    if (localIndex || tableDDLOptions.contains("TRANSACTIONAL=true")) return;

    try (Connection conn = getConnection()) {
        // create a vanilla HBase table (non-Phoenix)
        String randomTable = generateUniqueName();
        TableName hbaseTN = TableName.valueOf(randomTable);
        byte[] famBytes = Bytes.toBytes("fam");
        Table hTable = getUtility().createTable(hbaseTN, famBytes);
        TestUtil.addCoprocessor(conn, randomTable, UngroupedAggregateRegionObserver.class);
        Put put = new Put(Bytes.toBytes("row"));
        byte[] value = new byte[1];
        Bytes.random(value);
        put.addColumn(famBytes, Bytes.toBytes("colQ"), value);
        hTable.put(put);

        // major compaction shouldn't cause a timeout or RS abort
        List<HRegion> regions = getUtility().getHBaseCluster().getRegions(hbaseTN);
        HRegion hRegion = regions.get(0);
        hRegion.flush(true);
        HStore store = hRegion.getStore(famBytes);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);

        // we should be able to compact syscat itself as well
        regions =
                getUtility().getHBaseCluster().getRegions(
                        TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
        hRegion = regions.get(0);
        hRegion.flush(true);
        store = hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);
    }
}
 
Example #16
Source File: TestWriteHeavyIncrementObserverWithMemStoreCompaction.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws Exception {
  // sleep every 10 loops to give memstore compaction enough time to finish before reaching the
  // flush size.
  doIncrement(10);
  assertSum();
  HStore store = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0).getStore(FAMILY);
  // should have no store files created as we have done aggregating all in memory
  assertEquals(0, store.getStorefilesCount());
}
 
Example #17
Source File: TestCompactionWithThroughputController.java    From hbase with Apache License 2.0 5 votes vote down vote up
private HStore getStoreWithName(TableName tableName) {
  MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region region : hrs.getRegions(tableName)) {
      return ((HRegion) region).getStores().iterator().next();
    }
  }
  return null;
}
 
Example #18
Source File: StripeCompactionsPerformanceEvaluation.java    From hbase with Apache License 2.0 5 votes vote down vote up
private TableDescriptorBuilder.ModifyableTableDescriptor createHtd(boolean isStripe)
    throws Exception {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE_NAME);
  ColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(COLUMN_FAMILY);
  tableDescriptor.setColumnFamily(familyDescriptor);
  String noSplitsPolicy = DisabledRegionSplitPolicy.class.getName();
  tableDescriptor.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, noSplitsPolicy);
  if (isStripe) {
    tableDescriptor.setValue(StoreEngine.STORE_ENGINE_CLASS_KEY,
      StripeStoreEngine.class.getName());
    if (initialStripeCount != null) {
      tableDescriptor.setValue(
          StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialStripeCount.toString());
      tableDescriptor.setValue(
          HStore.BLOCKING_STOREFILES_KEY, Long.toString(10 * initialStripeCount));
    } else {
      tableDescriptor.setValue(HStore.BLOCKING_STOREFILES_KEY, "500");
    }
    if (splitSize != null) {
      tableDescriptor.setValue(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize.toString());
    }
    if (splitParts != null) {
      tableDescriptor.setValue(StripeStoreConfig.SPLIT_PARTS_KEY, splitParts.toString());
    }
  } else {
    tableDescriptor.setValue(HStore.BLOCKING_STOREFILES_KEY, "10"); // default
  }
  return tableDescriptor;
}
 
Example #19
Source File: TestFlushWithThroughputController.java    From hbase with Apache License 2.0 5 votes vote down vote up
private HStore getStoreWithName(TableName tableName) {
  MiniHBaseCluster cluster = hbtu.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region region : hrs.getRegions(tableName)) {
      return ((HRegion) region).getStores().iterator().next();
    }
  }
  return null;
}
 
Example #20
Source File: TestCompactionWithThroughputController.java    From hbase with Apache License 2.0 5 votes vote down vote up
private long testCompactionWithThroughputLimit() throws Exception {
  long throughputLimit = 1024L * 1024;
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.setLong(
    PressureAwareCompactionThroughputController
      .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
    throughputLimit);
  conf.setLong(
    PressureAwareCompactionThroughputController
      .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
    throughputLimit);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    PressureAwareCompactionThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    HStore store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    long duration = System.currentTimeMillis() - startTime;
    double throughput = (double) store.getStorefilesSize() / duration * 1000;
    // confirm that the speed limit work properly(not too fast, and also not too slow)
    // 20% is the max acceptable error rate.
    assertTrue(throughput < throughputLimit * 1.2);
    assertTrue(throughput > throughputLimit * 0.8);
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
Example #21
Source File: IntegrationTestIngestStripeCompactions.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
protected void initTable() throws IOException {
  // Do the same as the LoadTestTool does, but with different table configuration.
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(getTablename());
  tableDescriptor.setValue(StoreEngine.STORE_ENGINE_CLASS_KEY,
    StripeStoreEngine.class.getName());
  tableDescriptor.setValue(HStore.BLOCKING_STOREFILES_KEY, "100");
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(
      HFileTestUtil.DEFAULT_COLUMN_FAMILY);
  HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(),
    tableDescriptor, familyDescriptor);
}
 
Example #22
Source File: TestRegionCoprocessorHost.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testPreFlushScannerOpen() throws IOException {
  RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
  ScanInfo oldScanInfo = getScanInfo();
  HStore store = mock(HStore.class);
  when(store.getScanInfo()).thenReturn(oldScanInfo);
  ScanInfo newScanInfo = host.preFlushScannerOpen(store, mock(FlushLifeCycleTracker.class));
  verifyScanInfo(newScanInfo);
}
 
Example #23
Source File: TestRegionCoprocessorHost.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testPreCompactScannerOpen() throws IOException {
  RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf);
  ScanInfo oldScanInfo = getScanInfo();
  HStore store = mock(HStore.class);
  when(store.getScanInfo()).thenReturn(oldScanInfo);
  ScanInfo newScanInfo = host.preCompactScannerOpen(store, ScanType.COMPACT_DROP_DELETES,
    mock(CompactionLifeCycleTracker.class), mock(CompactionRequest.class), mock(User.class));
  verifyScanInfo(newScanInfo);
}
 
Example #24
Source File: TestFIFOCompactionPolicy.java    From hbase with Apache License 2.0 5 votes vote down vote up
private HStore getStoreWithName(TableName tableName) {
  MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (HRegion region : hrs.getRegions(tableName)) {
      return region.getStores().iterator().next();
    }
  }
  return null;
}
 
Example #25
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private BlockCache setCacheProperties(HRegion region) {
  Iterator<HStore> strItr = region.getStores().iterator();
  BlockCache cache = null;
  while (strItr.hasNext()) {
    HStore store = strItr.next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    // Use the last one
    cache = cacheConf.getBlockCache().get();
  }
  return cache;
}
 
Example #26
Source File: TestCatalogJanitor.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Path createReferences(final MasterServices services, final TableDescriptor td,
  final RegionInfo parent, final RegionInfo daughter, final byte[] midkey, final boolean top)
  throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = CommonFSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter, td.getColumnFamilies()[0].getName());
  Reference ref =
    top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
Example #27
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void waitForStoreFileCount(HStore store, int count, int timeout)
    throws InterruptedException {
  long start = System.currentTimeMillis();
  while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
    Thread.sleep(100);
  }
  System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
      store.getStorefilesCount());
  assertEquals(count, store.getStorefilesCount());
}
 
Example #28
Source File: TestCatalogJanitor.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test clearing a split parent.
 */
@Test
public void testCleanParent() throws IOException, InterruptedException {
  TableDescriptor td = createTableDescriptorForCurrentMethod();
  // Create regions.
  RegionInfo parent =
    createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  RegionInfo splita =
    createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  RegionInfo splitb =
    createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not remove the parent.
  Result r = createResult(parent, splita, splitb);
  // Add a reference under splitA directory so we don't clear out the parent.
  Path rootdir = this.masterServices.getMasterFileSystem().getRootDir();
  Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName());
  Path parentdir = new Path(tabledir, parent.getEncodedName());
  Path storedir = HStore.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName());
  Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem();
  Path path = ref.write(fs, p);
  assertTrue(fs.exists(path));
  LOG.info("Created reference " + path);
  // Add a parentdir for kicks so can check it gets removed by the catalogjanitor.
  fs.mkdirs(parentdir);
  assertFalse(this.janitor.cleanParent(parent, r));
  ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
  assertTrue(fs.exists(parentdir));
  // Remove the reference file and try again.
  assertTrue(fs.delete(p, true));
  assertTrue(this.janitor.cleanParent(parent, r));
  // Parent cleanup is run async as a procedure. Make sure parentdir is removed.
  ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
  assertTrue(!fs.exists(parentdir));
}
 
Example #29
Source File: SpaceQuotaHelperForTests.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public boolean evaluate() throws Exception {
  for (HRegion region : cluster.getRegions(tn)) {
    for (HStore store : region.getStores()) {
      Collection<HStoreFile> files =
          store.getStoreEngine().getStoreFileManager().getCompactedfiles();
      if (null != files && !files.isEmpty()) {
        LOG.debug(region.getRegionInfo().getEncodedName() + " still has compacted files");
        return false;
      }
    }
  }
  return true;
}
 
Example #30
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Do a small get/scan against one store. This is required because store
 * has no actual methods of querying itself, and relies on StoreScanner.
 */
public static List<Cell> getFromStoreFile(HStore store,
                                              byte [] row,
                                              NavigableSet<byte[]> columns
                                              ) throws IOException {
  Get get = new Get(row);
  Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
  s.put(store.getColumnFamilyDescriptor().getName(), columns);

  return getFromStoreFile(store,get);
}