Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#getStore()

The following examples show how to use org.apache.hadoop.hbase.regionserver.HRegion#getStore() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestZooKeeperTableArchiveClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  HStore s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compact(true);
}
 
Example 2
Source File: HFileArchiveTestingUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName,
    byte[] storeName) throws IOException {
  byte[] table = Bytes.toBytes(tableName);
  // get the RS and region serving our table
  List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
  HRegion region = servingRegions.get(0);

  // check that we actually have some store files that were archived
  Store store = region.getStore(storeName);
  return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}
 
Example 3
Source File: MutableIndexExtendedIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testCompactNonPhoenixTable() throws Exception {
    if (localIndex || tableDDLOptions.contains("TRANSACTIONAL=true")) return;

    try (Connection conn = getConnection()) {
        // create a vanilla HBase table (non-Phoenix)
        String randomTable = generateUniqueName();
        TableName hbaseTN = TableName.valueOf(randomTable);
        byte[] famBytes = Bytes.toBytes("fam");
        Table hTable = getUtility().createTable(hbaseTN, famBytes);
        TestUtil.addCoprocessor(conn, randomTable, UngroupedAggregateRegionObserver.class);
        Put put = new Put(Bytes.toBytes("row"));
        byte[] value = new byte[1];
        Bytes.random(value);
        put.addColumn(famBytes, Bytes.toBytes("colQ"), value);
        hTable.put(put);

        // major compaction shouldn't cause a timeout or RS abort
        List<HRegion> regions = getUtility().getHBaseCluster().getRegions(hbaseTN);
        HRegion hRegion = regions.get(0);
        hRegion.flush(true);
        HStore store = hRegion.getStore(famBytes);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);

        // we should be able to compact syscat itself as well
        regions =
                getUtility().getHBaseCluster().getRegions(
                        TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
        hRegion = regions.get(0);
        hRegion.flush(true);
        store = hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);
    }
}
 
Example 4
Source File: BytesCopyTaskSplitter.java    From spliceengine with GNU Affero General Public License v3.0 5 votes vote down vote up
public static List<byte[]> getCutPoints(HRegion region, byte[] start, byte[] end,
                                        int requestedSplits, long bytesPerSplit) throws IOException {
    Store store = null;
    try {
        store = region.getStore(SIConstants.DEFAULT_FAMILY_BYTES);
        HRegionUtil.lockStore(store);
        return HRegionUtil.getCutpoints(store, start, end, requestedSplits, bytesPerSplit);
    }catch (Throwable t) {
        throw Exceptions.getIOException(t);
    }finally{
        HRegionUtil.unlockStore(store);
    }
}
 
Example 5
Source File: AbstractTestLogRolling.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  Table table = null;

  // When the hbase:meta table can be opened, the region servers are running
  Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
  try {
    table = createTestTable(getName());

    server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
    HRegion region = server.getRegions(table.getName()).get(0);
    final WAL log = server.getWAL(region.getRegionInfo());
    Store s = region.getStore(HConstants.CATALOG_FAMILY);

    // Put some stuff into table, to make sure we have some files to compact.
    for (int i = 1; i <= 2; ++i) {
      doPut(table, i);
      admin.flush(table.getName());
    }
    doPut(table, 3); // don't flush yet, or compaction might trigger before we roll WAL
    assertEquals("Should have no WAL after initial writes", 0,
      AbstractFSWALProvider.getNumRolledLogFiles(log));
    assertEquals(2, s.getStorefilesCount());

    // Roll the log and compact table, to have compaction record in the 2nd WAL.
    log.rollWriter();
    assertEquals("Should have WAL; one table is not flushed", 1,
      AbstractFSWALProvider.getNumRolledLogFiles(log));
    admin.flush(table.getName());
    region.compact(false);
    // Wait for compaction in case if flush triggered it before us.
    Assert.assertNotNull(s);
    for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
      Threads.sleepWithoutInterrupt(200);
    }
    assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

    // Write some value to the table so the WAL cannot be deleted until table is flushed.
    doPut(table, 0); // Now 2nd WAL will have both compaction and put record for table.
    log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
    assertEquals("Should have WAL; one table is not flushed", 1,
      AbstractFSWALProvider.getNumRolledLogFiles(log));

    // Flush table to make latest WAL obsolete; write another record, and roll again.
    admin.flush(table.getName());
    doPut(table, 1);
    log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
    assertEquals("Should have 1 WALs at the end", 1,
      AbstractFSWALProvider.getNumRolledLogFiles(log));
  } finally {
    if (t != null) t.close();
    if (table != null) table.close();
  }
}
 
Example 6
Source File: TestScannerSelectionUsingTTL.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(conf);

  TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily(
      ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE)
          .setTimeToLive(TTL_SECONDS).build()).build();
  RegionInfo info = RegionInfoBuilder.newBuilder(TABLE).build();
  HRegion region = HBaseTestingUtility
      .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, td, cache);

  long ts = EnvironmentEdgeManager.currentTime();
  long version = 0; //make sure each new set of Put's have a new ts
  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
      version += TTL_MS;
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version,
                Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
    version++;
  }

  Scan scan = new Scan().readVersions(Integer.MAX_VALUE);
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    HStore store = region.getStore(FAMILY_BYTES);
    store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
  } else {
    region.compact(false);
  }

  HBaseTestingUtility.closeRegionAndWAL(region);
}