Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#flush()

The following examples show how to use org.apache.hadoop.hbase.regionserver.HRegion#flush() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRegionObserverScannerOpenHook.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
  Configuration conf = new HBaseTestingUtility().getConfiguration();
  HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 2
Source File: AbstractTestLogRolling.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that logs are deleted
 */
@Test
public void testLogRolling() throws Exception {
  this.tableName = getName();
  // TODO: Why does this write data take for ever?
  startAndWriteData();
  RegionInfo region = server.getRegions(TableName.valueOf(tableName)).get(0).getRegionInfo();
  final WAL log = server.getWAL(region);
  LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files");
  assertLogFileSize(log);

  // flush all regions
  for (HRegion r : server.getOnlineRegionsLocalContext()) {
    r.flush(true);
  }

  // Now roll the log
  log.rollWriter();

  int count = AbstractFSWALProvider.getNumRolledLogFiles(log);
  LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
  assertTrue(("actual count: " + count), count <= 2);
  assertLogFileSize(log);
}
 
Example 3
Source File: TestAsyncClusterAdminApi.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRollWALWALWriter() throws Exception {
  setUpforLogRolling();
  String className = this.getClass().getName();
  StringBuilder v = new StringBuilder(className);
  while (v.length() < 1000) {
    v.append(className);
  }
  byte[] value = Bytes.toBytes(v.toString());
  HRegionServer regionServer = startAndWriteData(tableName, value);
  LOG.info("after writing there are "
      + AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");

  // flush all regions
  for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
    r.flush(true);
  }
  admin.rollWALWriter(regionServer.getServerName()).join();
  int count = AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
  LOG.info("after flushing all regions and rolling logs there are " +
      count + " log files");
  assertTrue(("actual count: " + count), count <= 2);
}
 
Example 4
Source File: TestSimpleRegionNormalizerOnCluster.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static List<HRegion> generateTestData(final TableName tableName,
  final int... regionSizesMb) throws IOException {
  final List<HRegion> generatedRegions;
  final int numRegions = regionSizesMb.length;
  try (Table ignored = TEST_UTIL.createMultiRegionTable(tableName, FAMILY_NAME, numRegions)) {
    // Need to get sorted list of regions here
    generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
    generatedRegions.sort(Comparator.comparing(HRegion::getRegionInfo, RegionInfo.COMPARATOR));
    assertEquals(numRegions, generatedRegions.size());
    for (int i = 0; i < numRegions; i++) {
      HRegion region = generatedRegions.get(i);
      generateTestData(region, regionSizesMb[i]);
      region.flush(true);
    }
  }
  return generatedRegions;
}
 
Example 5
Source File: TestAdmin2.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testWALRollWriting() throws Exception {
  setUpforLogRolling();
  String className = this.getClass().getName();
  StringBuilder v = new StringBuilder(className);
  while (v.length() < 1000) {
    v.append(className);
  }
  byte[] value = Bytes.toBytes(v.toString());
  HRegionServer regionServer = startAndWriteData(TableName.valueOf(name.getMethodName()), value);
  LOG.info("after writing there are "
      + AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");

  // flush all regions
  for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
    r.flush(true);
  }
  ADMIN.rollWALWriter(regionServer.getServerName());
  int count = AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
  LOG.info("after flushing all regions and rolling logs there are " +
      count + " log files");
  assertTrue(("actual count: " + count), count <= 2);
}
 
Example 6
Source File: MiniHBaseCluster.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void executeFlush(HRegion region) throws IOException {
  // retry 5 times if we can not flush
  for (int i = 0; i < 5; i++) {
    FlushResult result = region.flush(true);
    if (result.getResult() != FlushResult.Result.CANNOT_FLUSH) {
      return;
    }
    Threads.sleep(1000);
  }
}
 
Example 7
Source File: TestRegionObserverInterface.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testPreWALAppendNotCalledOnMetaEdit() throws Exception {
  final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() +
      "." + name.getMethodName());
  TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
  ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY);
  tdBuilder.setColumnFamily(cfBuilder.build());
  tdBuilder.setCoprocessor(SimpleRegionObserver.class.getName());
  TableDescriptor td = tdBuilder.build();
  Table table = util.createTable(td, new byte[][] { A, B, C });

  PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener();
  List<HRegion> regions = util.getHBaseCluster().getRegions(tableName);
  //should be only one region
  HRegion region = regions.get(0);

  region.getWAL().registerWALActionsListener(listener);
  //flushing should write to the WAL
  region.flush(true);
  //so should compaction
  region.compact(false);
  //and so should closing the region
  region.close();

  //but we still shouldn't have triggered preWALAppend because no user data was written
  String[] methods = new String[] {"getCtPreWALAppend"};
  Object[] expectedResult = new Integer[]{0};
  verifyMethodResult(SimpleRegionObserver.class, methods, tableName, expectedResult);
}
 
Example 8
Source File: TestEncodedSeekers.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void doPuts(HRegion region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new ArrayBackedTag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.addColumn(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
Example 9
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Load region with rows from 'aaa' to 'zzz'.
 * @param r Region
 * @param f Family
 * @param flush flush the cache if true
 * @return Count of rows loaded.
 * @throws IOException
 */
public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
throws IOException {
  byte[] k = new byte[3];
  int rowCount = 0;
  for (byte b1 = 'a'; b1 <= 'z'; b1++) {
    for (byte b2 = 'a'; b2 <= 'z'; b2++) {
      for (byte b3 = 'a'; b3 <= 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(f, null, k);
        if (r.getWAL() == null) {
          put.setDurability(Durability.SKIP_WAL);
        }
        int preRowCount = rowCount;
        int pause = 10;
        int maxPause = 1000;
        while (rowCount == preRowCount) {
          try {
            r.put(put);
            rowCount++;
          } catch (RegionTooBusyException e) {
            pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
            Threads.sleep(pause);
          }
        }
      }
    }
    if (flush) {
      r.flush(true);
    }
  }
  return rowCount;
}
 
Example 10
Source File: MutableIndexExtendedIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testCompactNonPhoenixTable() throws Exception {
    if (localIndex || tableDDLOptions.contains("TRANSACTIONAL=true")) return;

    try (Connection conn = getConnection()) {
        // create a vanilla HBase table (non-Phoenix)
        String randomTable = generateUniqueName();
        TableName hbaseTN = TableName.valueOf(randomTable);
        byte[] famBytes = Bytes.toBytes("fam");
        Table hTable = getUtility().createTable(hbaseTN, famBytes);
        TestUtil.addCoprocessor(conn, randomTable, UngroupedAggregateRegionObserver.class);
        Put put = new Put(Bytes.toBytes("row"));
        byte[] value = new byte[1];
        Bytes.random(value);
        put.addColumn(famBytes, Bytes.toBytes("colQ"), value);
        hTable.put(put);

        // major compaction shouldn't cause a timeout or RS abort
        List<HRegion> regions = getUtility().getHBaseCluster().getRegions(hbaseTN);
        HRegion hRegion = regions.get(0);
        hRegion.flush(true);
        HStore store = hRegion.getStore(famBytes);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);

        // we should be able to compact syscat itself as well
        regions =
                getUtility().getHBaseCluster().getRegions(
                        TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
        hRegion = regions.get(0);
        hRegion.flush(true);
        store = hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        store.triggerMajorCompaction();
        store.compactRecentForTestingAssumingDefaultPolicy(1);
    }
}
 
Example 11
Source File: TestForceCacheImportantBlocks.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void writeTestData(HRegion region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.addColumn(CF_BYTES, Bytes.toBytes("col" + j), ts,
                Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
Example 12
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException {
  Table table = null;
  try {
    latch = new CountDownLatch(1);
    // Check if get() returns blocks on its close() itself
    getLatch = new CountDownLatch(1);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create KV that will give you two blocks
    // Create a table with block size as 1024
    byte[][] fams = new byte[10][];
    fams[0] = FAMILY;
    for (int i = 1; i < 10; i++) {
      fams[i] = (Bytes.toBytes("testFamily" + i));
    }
    table = TEST_UTIL.createTable(tableName, fams, 1, 1024,
        CustomInnerRegionObserver.class.getName());
    // get the block cache and region
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region =
        TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    BlockCache cache = setCacheProperties(region);

    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    for (int i = 1; i < 10; i++) {
      put = new Put(ROW);
      put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
      table.put(put);
      if (i % 2 == 0) {
        region.flush(true);
      }
    }
    region.flush(true);
    byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER2, data2);
    table.put(put);
    region.flush(true);
    // flush the data
    System.out.println("Flushing cache");
    // Should create one Hfile with 2 blocks
    CustomInnerRegionObserver.waitForGets.set(true);
    // Create three sets of gets
    GetThread[] getThreads = initiateGet(table, true, true);
    Thread.sleep(200);
    Iterator<CachedBlock> iterator = cache.iterator();
    boolean usedBlocksFound = false;
    int refCount = 0;
    int noOfBlocksWithRef = 0;
    while (iterator.hasNext()) {
      CachedBlock next = iterator.next();
      BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
      if (cache instanceof BucketCache) {
        refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
      } else if (cache instanceof CombinedBlockCache) {
        refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
      } else {
        continue;
      }
      if (refCount != 0) {
        // Blocks will be with count 3
        System.out.println("The refCount is " + refCount);
        assertEquals(NO_OF_THREADS, refCount);
        usedBlocksFound = true;
        noOfBlocksWithRef++;
      }
    }
    assertTrue(usedBlocksFound);
    // the number of blocks referred
    assertEquals(3, noOfBlocksWithRef);
    CustomInnerRegionObserver.getCdl().get().countDown();
    for (GetThread thread : getThreads) {
      thread.join();
    }
    // Verify whether the gets have returned the blocks that it had
    CustomInnerRegionObserver.waitForGets.set(true);
    // giving some time for the block to be decremented
    checkForBlockEviction(cache, true, false);
    getLatch.countDown();
    System.out.println("Gets should have returned the bloks");
  } finally {
    if (table != null) {
      table.close();
    }
  }
}
 
Example 13
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException {
  Table table = null;
  try {
    latch = new CountDownLatch(1);
    // Check if get() returns blocks on its close() itself
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create KV that will give you two blocks
    // Create a table with block size as 1024
    byte[][] fams = new byte[10][];
    fams[0] = FAMILY;
    for (int i = 1; i < 10; i++) {
      fams[i] = (Bytes.toBytes("testFamily" + i));
    }
    table = TEST_UTIL.createTable(tableName, fams, 1, 1024,
        CustomInnerRegionObserver.class.getName());
    // get the block cache and region
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region =
        TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    BlockCache cache = setCacheProperties(region);

    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    for (int i = 1; i < 10; i++) {
      put = new Put(ROW);
      put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
      table.put(put);
      if (i % 2 == 0) {
        region.flush(true);
      }
    }
    region.flush(true);
    byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER2, data2);
    table.put(put);
    region.flush(true);
    // flush the data
    System.out.println("Flushing cache");
    // Should create one Hfile with 2 blocks
    // Create three sets of gets
    ScanThread[] scanThreads = initiateScan(table, true);
    Thread.sleep(200);
    Iterator<CachedBlock> iterator = cache.iterator();
    boolean usedBlocksFound = false;
    int refCount = 0;
    int noOfBlocksWithRef = 0;
    while (iterator.hasNext()) {
      CachedBlock next = iterator.next();
      BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
      if (cache instanceof BucketCache) {
        refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
      } else if (cache instanceof CombinedBlockCache) {
        refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
      } else {
        continue;
      }
      if (refCount != 0) {
        // Blocks will be with count 3
        System.out.println("The refCount is " + refCount);
        assertEquals(NO_OF_THREADS, refCount);
        usedBlocksFound = true;
        noOfBlocksWithRef++;
      }
    }
    assertTrue(usedBlocksFound);
    // the number of blocks referred
    assertEquals(12, noOfBlocksWithRef);
    CustomInnerRegionObserver.getCdl().get().countDown();
    for (ScanThread thread : scanThreads) {
      thread.join();
    }
    // giving some time for the block to be decremented
    checkForBlockEviction(cache, true, false);
  } finally {
    if (table != null) {
      table.close();
    }
  }
}
 
Example 14
Source File: TestBlockEvictionFromClient.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedException {
  Table table = null;
  try {
    latch = new CountDownLatch(1);
    // Check if get() returns blocks on its close() itself
    getLatch = new CountDownLatch(1);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create KV that will give you two blocks
    // Create a table with block size as 1024
    table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024,
        CustomInnerRegionObserver.class.getName());
    // get the block cache and region
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region =
        TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    HStore store = region.getStores().iterator().next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    BlockCache cache = cacheConf.getBlockCache().get();

    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    region.flush(true);
    byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER2, data2);
    table.put(put);
    region.flush(true);
    // flush the data
    System.out.println("Flushing cache");
    // Should create one Hfile with 2 blocks
    CustomInnerRegionObserver.waitForGets.set(true);
    // Create three sets of gets
    GetThread[] getThreads = initiateGet(table, false, false);
    Thread.sleep(200);
    CustomInnerRegionObserver.getCdl().get().countDown();
    for (GetThread thread : getThreads) {
      thread.join();
    }
    // Verify whether the gets have returned the blocks that it had
    CustomInnerRegionObserver.waitForGets.set(true);
    // giving some time for the block to be decremented
    checkForBlockEviction(cache, true, false);
    getLatch.countDown();
    System.out.println("Gets should have returned the bloks");
  } finally {
    if (table != null) {
      table.close();
    }
  }
}
 
Example 15
Source File: TestAvoidCellReferencesIntoShippedBlocks.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testHBase16372InCompactionWritePath() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  // Create a table with block size as 1024
  final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024,
    CompactorRegionObserver.class.getName());
  try {
    // get the block cache and region
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region =
        (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    HStore store = region.getStores().iterator().next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    final BlockCache cache = cacheConf.getBlockCache().get();
    // insert data. 5 Rows are added
    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    // data was in memstore so don't expect any changes
    region.flush(true);
    put = new Put(ROW1);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    put = new Put(ROW2);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    put = new Put(ROW2);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    // data was in memstore so don't expect any changes
    region.flush(true);
    put = new Put(ROW3);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    put = new Put(ROW3);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    put = new Put(ROW4);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    // data was in memstore so don't expect any changes
    region.flush(true);
    put = new Put(ROW4);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    put = new Put(ROW5);
    put.addColumn(FAMILY, QUALIFIER, data);
    table.put(put);
    put = new Put(ROW5);
    put.addColumn(FAMILY, QUALIFIER1, data);
    table.put(put);
    // data was in memstore so don't expect any changes
    region.flush(true);
    // Load cache
    Scan s = new Scan();
    s.setMaxResultSize(1000);
    int count;
    try (ResultScanner scanner = table.getScanner(s)) {
      count = Iterables.size(scanner);
    }
    assertEquals("Count all the rows ", 6, count);
    // all the cache is loaded
    // trigger a major compaction
    ScannerThread scannerThread = new ScannerThread(table, cache);
    scannerThread.start();
    region.compact(true);
    s = new Scan();
    s.setMaxResultSize(1000);
    try (ResultScanner scanner = table.getScanner(s)) {
      count = Iterables.size(scanner);
    }
    assertEquals("Count all the rows ", 6, count);
  } finally {
    table.close();
  }
}
 
Example 16
Source File: TestFromClientSide5.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetClosestRowBefore() throws IOException, InterruptedException {
  final TableName tableName = name.getTableName();
  final byte[] firstRow = Bytes.toBytes("row111");
  final byte[] secondRow = Bytes.toBytes("row222");
  final byte[] thirdRow = Bytes.toBytes("row333");
  final byte[] forthRow = Bytes.toBytes("row444");
  final byte[] beforeFirstRow = Bytes.toBytes("row");
  final byte[] beforeSecondRow = Bytes.toBytes("row22");
  final byte[] beforeThirdRow = Bytes.toBytes("row33");
  final byte[] beforeForthRow = Bytes.toBytes("row44");

  try (Table table =
      TEST_UTIL.createTable(tableName,
        new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024);
    RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {

    // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
    // in Store.rowAtOrBeforeFromStoreFile
    String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
    HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
    Put put1 = new Put(firstRow);
    Put put2 = new Put(secondRow);
    Put put3 = new Put(thirdRow);
    Put put4 = new Put(forthRow);
    byte[] one = new byte[] { 1 };
    byte[] two = new byte[] { 2 };
    byte[] three = new byte[] { 3 };
    byte[] four = new byte[] { 4 };

    put1.addColumn(HConstants.CATALOG_FAMILY, null, one);
    put2.addColumn(HConstants.CATALOG_FAMILY, null, two);
    put3.addColumn(HConstants.CATALOG_FAMILY, null, three);
    put4.addColumn(HConstants.CATALOG_FAMILY, null, four);
    table.put(put1);
    table.put(put2);
    table.put(put3);
    table.put(put4);
    region.flush(true);

    Result result;

    // Test before first that null is returned
    result = getReverseScanResult(table, beforeFirstRow);
    assertNull(result);

    // Test at first that first is returned
    result = getReverseScanResult(table, firstRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), firstRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));

    // Test in between first and second that first is returned
    result = getReverseScanResult(table, beforeSecondRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), firstRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one));

    // Test at second make sure second is returned
    result = getReverseScanResult(table, secondRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), secondRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));

    // Test in second and third, make sure second is returned
    result = getReverseScanResult(table, beforeThirdRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), secondRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two));

    // Test at third make sure third is returned
    result = getReverseScanResult(table, thirdRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), thirdRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));

    // Test in third and forth, make sure third is returned
    result = getReverseScanResult(table, beforeForthRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), thirdRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three));

    // Test at forth make sure forth is returned
    result = getReverseScanResult(table, forthRow);
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), forthRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));

    // Test after forth make sure forth is returned
    result = getReverseScanResult(table, Bytes.add(forthRow, one));
    assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null));
    assertTrue(Bytes.equals(result.getRow(), forthRow));
    assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four));
  }
}
 
Example 17
Source File: TestFilter.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testFilterListWithPrefixFilter() throws IOException {
  byte[] family = Bytes.toBytes("f1");
  byte[] qualifier = Bytes.toBytes("q1");
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(
      TableName.valueOf(name.getMethodName()));

  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
  RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
  HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
    TEST_UTIL.getConfiguration(), tableDescriptor);

  for(int i=0; i<5; i++) {
    Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
    p.setDurability(Durability.SKIP_WAL);
    p.addColumn(family, qualifier, Bytes.toBytes(String.valueOf(111 + i)));
    testRegion.put(p);
  }
  testRegion.flush(true);

  // rows starting with "b"
  PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
  // rows with value of column 'q1' set to '113'
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(
      family, qualifier, CompareOperator.EQUAL, Bytes.toBytes("113"));
  // combine these two with OR in a FilterList
  FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf);

  Scan s1 = new Scan();
  s1.setFilter(filterList);
  InternalScanner scanner = testRegion.getScanner(s1);
  List<Cell> results = new ArrayList<>();
  int resultCount = 0;
  while (scanner.next(results)) {
    resultCount++;
    byte[] row =  CellUtil.cloneRow(results.get(0));
    LOG.debug("Found row: " + Bytes.toStringBinary(row));
    assertTrue(Bytes.equals(row, Bytes.toBytes("brow"))
        || Bytes.equals(row, Bytes.toBytes("crow")));
    results.clear();
  }
  assertEquals(2, resultCount);
  scanner.close();

  WAL wal = ((HRegion)testRegion).getWAL();
  ((HRegion)testRegion).close();
  wal.close();
}
 
Example 18
Source File: TestCoprocessorInterface.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initConfig();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HTestConst.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);

  Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  // make sure that all coprocessor of a class have identical sharedDatas
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);

  // now have all Environments fail
  try {
    byte [] r = region.getRegionInfo().getStartKey();
    if (r == null || r.length <= 0) {
      // Its the start row.  Can't ask for null.  Ask for minimal key instead.
      r = new byte [] {0};
    }
    Get g = new Get(r);
    region.get(g);
    fail();
  } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
  }
  assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class));
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 19
Source File: TestFlushWithThroughputController.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test the tuning task of {@link PressureAwareFlushThroughputController}
 */
@Test
public void testFlushThroughputTuning() throws Exception {
  Configuration conf = hbtu.getConfiguration();
  setMaxMinThroughputs(20L * 1024 * 1024, 10L * 1024 * 1024);
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,
    3000);
  hbtu.startMiniCluster(1);
  Connection conn = ConnectionFactory.createConnection(conf);
  hbtu.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName)
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false)
    .build());
  hbtu.waitTableAvailable(tableName);
  HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
  double pressure = regionServer.getFlushPressure();
  LOG.debug("Flush pressure before flushing: " + pressure);
  PressureAwareFlushThroughputController throughputController =
      (PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
  for (HRegion region : regionServer.getRegions()) {
    region.flush(true);
  }
  // We used to assert that the flush pressure is zero but after HBASE-15787 or HBASE-18294 we
  // changed to use heapSize instead of dataSize to calculate the flush pressure, and since
  // heapSize will never be zero, so flush pressure will never be zero either. So we changed the
  // assertion here.
  assertTrue(regionServer.getFlushPressure() < pressure);
  Thread.sleep(5000);
  boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(hbtu.getConfiguration());
  if (tablesOnMaster) {
    // If no tables on the master, this math is off and I'm not sure what it is supposed to be
    // when meta is on the regionserver and not on the master.
    assertEquals(10L * 1024 * 1024, throughputController.getMaxThroughput(), EPSILON);
  }
  Table table = conn.getTable(tableName);
  Random rand = new Random();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[256 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
  }
  Thread.sleep(5000);
  double expectedThroughPut = 10L * 1024 * 1024 * (1 + regionServer.getFlushPressure());
  assertEquals(expectedThroughPut, throughputController.getMaxThroughput(), EPSILON);

  conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
    NoLimitThroughputController.class.getName());
  regionServer.onConfigurationChange(conf);
  assertTrue(throughputController.isStopped());
  assertTrue(regionServer.getFlushThroughputController() instanceof NoLimitThroughputController);
  conn.close();
}
 
Example 20
Source File: AbstractTestWALReplay.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 *
 * @throws Exception
 */
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
  final TableName tableName =
      TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
  byte[] family1 = Bytes.toBytes("cf1");
  byte[] family2 = Bytes.toBytes("cf2");
  byte[] qualifier = Bytes.toBytes("q");
  byte[] value = Bytes.toBytes("testV");
  byte[][] familys = { family1, family2 };
  TEST_UTIL.createTable(tableName, familys);
  Table htable = TEST_UTIL.getConnection().getTable(tableName);
  Put put = new Put(Bytes.toBytes("r1"));
  put.addColumn(family1, qualifier, value);
  htable.put(put);
  ResultScanner resultScanner = htable.getScanner(new Scan());
  int count = 0;
  while (resultScanner.next() != null) {
    count++;
  }
  resultScanner.close();
  assertEquals(1, count);

  MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
  List<HRegion> regions = hbaseCluster.getRegions(tableName);
  assertEquals(1, regions.size());

  // move region to another regionserver
  Region destRegion = regions.get(0);
  int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName());
  assertTrue("Please start more than 1 regionserver",
      hbaseCluster.getRegionServerThreads().size() > 1);
  int destServerNum = 0;
  while (destServerNum == originServerNum) {
    destServerNum++;
  }
  HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
  HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
  // move region to destination regionserver
  TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName());

  // delete the row
  Delete del = new Delete(Bytes.toBytes("r1"));
  htable.delete(del);
  resultScanner = htable.getScanner(new Scan());
  count = 0;
  while (resultScanner.next() != null) {
    count++;
  }
  resultScanner.close();
  assertEquals(0, count);

  // flush region and make major compaction
  HRegion region =
      (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
  region.flush(true);
  // wait to complete major compaction
  for (HStore store : region.getStores()) {
    store.triggerMajorCompaction();
  }
  region.compact(true);

  // move region to origin regionserver
  TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName());
  // abort the origin regionserver
  originServer.abort("testing");

  // see what we get
  Result result = htable.get(new Get(Bytes.toBytes("r1")));
  if (result != null) {
    assertTrue("Row is deleted, but we get" + result.toString(),
        (result == null) || result.isEmpty());
  }
  resultScanner.close();
}