Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#get()

The following examples show how to use org.apache.hadoop.hbase.regionserver.HRegion#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 6 votes vote down vote up
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
    final boolean present) throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Result result = region.get(new Get(data));

    boolean hasResult = result != null && !result.isEmpty();
    assertEquals(failMsg + result, present, hasResult);
    if (!present) continue;

    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
Example 2
Source File: TestSplitTableRegionProcedure.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void verifyData(
    final HRegion newReg,
    final int startRow,
    final int numRows,
    final byte[]... families)
    throws IOException {
  for (int i = startRow; i < startRow + numRows; i++) {
    byte[] row = Bytes.toBytes("" + i);
    Get get = new Get(row);
    Result result = newReg.get(get);
    Cell[] raw = result.rawCells();
    assertEquals(families.length, result.size());
    for (int j = 0; j < families.length; j++) {
      assertTrue(CellUtil.matchingRows(raw[j], row));
      assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
    }
  }
}
 
Example 3
Source File: TestRegionObserverScannerOpenHook.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
  Configuration conf = new HBaseTestingUtility().getConfiguration();
  HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  region.put(put);

  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 4
Source File: TestRegionObserverScannerOpenHook.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
  Configuration conf = new HBaseTestingUtility().getConfiguration();
  HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 5
Source File: TestForceCacheImportantBlocks.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testCacheBlocks() throws IOException {
  // Set index block size to be the same as normal block size.
  TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE);
  BlockCache blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration());
  ColumnFamilyDescriptor cfd =
      ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS)
          .setCompressionType(COMPRESSION_ALGORITHM).setBloomFilterType(BLOOM_TYPE)
          .setBlocksize(BLOCK_SIZE).setBlockCacheEnabled(cfCacheEnabled).build();
  HRegion region = TEST_UTIL.createTestRegion(TABLE, cfd, blockCache);
  CacheStats stats = blockCache.getStats();
  writeTestData(region);
  assertEquals(0, stats.getHitCount());
  assertEquals(0, HFile.DATABLOCK_READ_COUNT.sum());
  // Do a single get, take count of caches.  If we are NOT caching DATA blocks, the miss
  // count should go up.  Otherwise, all should be cached and the miss count should not rise.
  region.get(new Get(Bytes.toBytes("row" + 0)));
  assertTrue(stats.getHitCount() > 0);
  assertTrue(HFile.DATABLOCK_READ_COUNT.sum() > 0);
  long missCount = stats.getMissCount();
  region.get(new Get(Bytes.toBytes("row" + 0)));
  if (this.cfCacheEnabled) assertEquals(missCount, stats.getMissCount());
  else assertTrue(stats.getMissCount() > missCount);
}
 
Example 6
Source File: TestWALSplitToHFile.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testDifferentRootDirAndWALRootDir() throws Exception {
  // Change wal root dir and reset the configuration
  Path walRootDir = UTIL.createWALRootDir();
  this.conf = HBaseConfiguration.create(UTIL.getConfiguration());

  FileSystem walFs = CommonFSUtils.getWALFileSystem(this.conf);
  this.oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  String serverName =
      ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, System.currentTimeMillis())
          .toString();
  this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
  this.logDir = new Path(walRootDir, logName);
  this.wals = new WALFactory(conf, TEST_NAME.getMethodName());

  Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
  TableDescriptor td = pair.getFirst();
  RegionInfo ri = pair.getSecond();

  WAL wal = createWAL(walFs, walRootDir, logName);
  HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
  writeData(td, region);

  // Now close the region without flush
  region.close(true);
  wal.shutdown();
  // split the log
  WALSplitter.split(walRootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);

  WAL wal2 = createWAL(walFs, walRootDir, logName);
  HRegion region2 = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal2);
  Result result2 = region2.get(new Get(ROW));
  assertEquals(td.getColumnFamilies().length, result2.size());
  for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
    assertTrue(Bytes.equals(VALUE1, result2.getValue(cfd.getName(), QUALIFIER)));
  }
}
 
Example 7
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
   * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
   * seqids.
   * @throws Exception on failure
   */
  @SuppressWarnings("deprecation")
@Test
  public void testReplayEditsWrittenViaHRegion() throws Exception {
    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
    final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr), 
        null, null, false);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
    
    //setup basic indexing for the table
    // enable indexing to a non-existant index table
    byte[] family = new byte[] { 'a' };
    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
    builder.addIndexGroup(fam1);
    builder.build(htd);

    // create the region + its WAL
    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    region0.close();
    region0.getWAL().close();

    WALFactory walFactory = new WALFactory(this.conf, null, "localhost,1234");

    WAL wal = createWAL(this.conf, walFactory);
    RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
    // mock out some of the internals of the RSS, so we can run CPs
    Mockito.when(mockRS.getWAL(null)).thenReturn(wal);
    RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
    Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
    ServerName mockServerName = Mockito.mock(ServerName.class);
    Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
    Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
    HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
    region.initialize();
    region.getSequenceId().set(0);

    //make an attempted write to the primary that should also be indexed
    byte[] rowkey = Bytes.toBytes("indexed_row_key");
    Put p = new Put(rowkey);
    p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
    region.put(p);

    // we should then see the server go down
    Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
      Mockito.any(Exception.class));

    // then create the index table so we are successful on WAL replay
    CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

    // run the WAL split and setup the region
    runWALSplit(this.conf, walFactory);
    WAL wal2 = createWAL(this.conf, walFactory);
    HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

    // initialize the region - this should replay the WALEdits from the WAL
    region1.initialize();

    // now check to ensure that we wrote to the index table
    HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
    int indexSize = getKeyValueCount(index);
    assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
    Get g = new Get(rowkey);
    final Result result = region1.get(g);
    assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

    // cleanup the index table
    HBaseAdmin admin = UTIL.getHBaseAdmin();
    admin.disableTable(INDEX_TABLE_NAME);
    admin.deleteTable(INDEX_TABLE_NAME);
    admin.close();
  }
 
Example 8
Source File: TestWALSplitToHFile.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids.
 */
@Test
public void testWrittenViaHRegion()
    throws IOException, SecurityException, IllegalArgumentException, InterruptedException {
  Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
  TableDescriptor td = pair.getFirst();
  RegionInfo ri = pair.getSecond();

  // Write countPerFamily edits into the three families.  Do a flush on one
  // of the families during the load of edits so its seqid is not same as
  // others to test we do right thing when different seqids.
  WAL wal = createWAL(this.conf, rootDir, logName);
  HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
  long seqid = region.getOpenSeqNum();
  boolean first = true;
  for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
    addRegionEdits(ROW, cfd.getName(), countPerFamily, this.ee, region, "x");
    if (first) {
      // If first, so we have at least one family w/ different seqid to rest.
      region.flush(true);
      first = false;
    }
  }
  // Now assert edits made it in.
  final Get g = new Get(ROW);
  Result result = region.get(g);
  assertEquals(countPerFamily * td.getColumnFamilies().length, result.size());
  // Now close the region (without flush), split the log, reopen the region and assert that
  // replay of log has the correct effect, that our seqids are calculated correctly so
  // all edits in logs are seen as 'stale'/old.
  region.close(true);
  wal.shutdown();
  try {
    WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
  } catch (Exception e) {
    LOG.debug("Got exception", e);
  }

  WAL wal2 = createWAL(this.conf, rootDir, logName);
  HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
  long seqid2 = region2.getOpenSeqNum();
  assertTrue(seqid + result.size() < seqid2);
  final Result result1b = region2.get(g);
  assertEquals(result.size(), result1b.size());

  // Next test.  Add more edits, then 'crash' this region by stealing its wal
  // out from under it and assert that replay of the log adds the edits back
  // correctly when region is opened again.
  for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
    addRegionEdits(ROW, hcd.getName(), countPerFamily, this.ee, region2, "y");
  }
  // Get count of edits.
  final Result result2 = region2.get(g);
  assertEquals(2 * result.size(), result2.size());
  wal2.sync();
  final Configuration newConf = HBaseConfiguration.create(this.conf);
  User user = HBaseTestingUtility.getDifferentUser(newConf, td.getTableName().getNameAsString());
  user.runAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(conf), conf, wals);
      FileSystem newFS = FileSystem.get(newConf);
      // Make a new wal for new region open.
      WAL wal3 = createWAL(newConf, rootDir, logName);
      Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
      HRegion region3 = new HRegion(tableDir, wal3, newFS, newConf, ri, td, null);
      long seqid3 = region3.initialize();
      Result result3 = region3.get(g);
      // Assert that count of cells is same as before crash.
      assertEquals(result2.size(), result3.size());

      // I can't close wal1.  Its been appropriated when we split.
      region3.close();
      wal3.close();
      return null;
    }
  });
}
 
Example 9
Source File: TestCoprocessorInterface.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initConfig();
  HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HTestConst.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);

  Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  // make sure that all coprocessor of a class have identical sharedDatas
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);

  // now have all Environments fail
  try {
    byte [] r = region.getRegionInfo().getStartKey();
    if (r == null || r.length <= 0) {
      // Its the start row.  Can't ask for null.  Ask for minimal key instead.
      r = new byte [] {0};
    }
    Get g = new Get(r);
    region.get(g);
    fail();
  } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
  }
  assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class));
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 10
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
   * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
   * seqids.
   * @throws Exception on failure
   */
@Test
  public void testReplayEditsWrittenViaHRegion() throws Exception {
    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
    final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build();
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
    
    //setup basic indexing for the table
    // enable indexing to a non-existant index table
    byte[] family = new byte[] { 'a' };
    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
    builder.addIndexGroup(fam1);
    builder.build(htd);
    WALFactory walFactory = new WALFactory(this.conf, "localhost,1234");

    WAL wal = createWAL(this.conf, walFactory);
    // create the region + its WAL
    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type
    region0.close();
    region0.getWAL().close();

    HRegionServer mockRS = Mockito.mock(HRegionServer.class);
    // mock out some of the internals of the RSS, so we can run CPs
    when(mockRS.getWAL(null)).thenReturn(wal);
    RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
    when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
    ServerName mockServerName = Mockito.mock(ServerName.class);
    when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
    when(mockRS.getServerName()).thenReturn(mockServerName);
    HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS));
    region.initialize();


    //make an attempted write to the primary that should also be indexed
    byte[] rowkey = Bytes.toBytes("indexed_row_key");
    Put p = new Put(rowkey);
    p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
    region.put(p);

    // we should then see the server go down
    Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
      Mockito.any(Exception.class));

    // then create the index table so we are successful on WAL replay
    TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME);

    // run the WAL split and setup the region
    runWALSplit(this.conf, walFactory);
    WAL wal2 = createWAL(this.conf, walFactory);
    HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

    // initialize the region - this should replay the WALEdits from the WAL
    region1.initialize();
    org.apache.hadoop.hbase.client.Connection hbaseConn =
            ConnectionFactory.createConnection(UTIL.getConfiguration());

    // now check to ensure that we wrote to the index table
    Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME));
    int indexSize = getKeyValueCount(index);
    assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
    Get g = new Get(rowkey);
    final Result result = region1.get(g);
    assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

    // cleanup the index table
    Admin admin = UTIL.getAdmin();
    admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME));
    admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME));
    admin.close();
  }
 
Example 11
Source File: TestWALReplayWithIndexWritesAndCompressedWAL.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
 * seqids.
 * @throws Exception on failure
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
  final String tableNameStr = "testReplayEditsWrittenViaHRegion";
  final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false);
  final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
  deleteDir(basedir);
  final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
  
  //setup basic indexing for the table
  // enable indexing to a non-existant index table
  byte[] family = new byte[] { 'a' };
  ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(htd);

  // create the region + its WAL
  HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
  region0.close();
  region0.getLog().closeAndDelete();
  HLog wal = createWAL(this.conf);
  RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
  // mock out some of the internals of the RSS, so we can run CPs
  Mockito.when(mockRS.getWAL()).thenReturn(wal);
  RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
  Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
  ServerName mockServerName = Mockito.mock(ServerName.class);
  Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234");
  Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
  HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
  long seqid = region.initialize();
  // HRegionServer usually does this. It knows the largest seqid across all regions.
  wal.setSequenceNumber(seqid);
  
  //make an attempted write to the primary that should also be indexed
  byte[] rowkey = Bytes.toBytes("indexed_row_key");
  Put p = new Put(rowkey);
  p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
  region.put(new Put[] { p });

  // we should then see the server go down
  Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
    Mockito.any(Exception.class));
  region.close(true);
  wal.close();

  // then create the index table so we are successful on WAL replay
  CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

  // run the WAL split and setup the region
  runWALSplit(this.conf);
  HLog wal2 = createWAL(this.conf);
  HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

  // initialize the region - this should replay the WALEdits from the WAL
  region1.initialize();

  // now check to ensure that we wrote to the index table
  HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
  int indexSize = getKeyValueCount(index);
  assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
  Get g = new Get(rowkey);
  final Result result = region1.get(g);
  assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

  // cleanup the index table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.disableTable(INDEX_TABLE_NAME);
  admin.deleteTable(INDEX_TABLE_NAME);
  admin.close();
}
 
Example 12
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolveRolledBackWorks() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);
    TrafficControl control = GreenLight.INSTANCE;

    final TxnStore store = new TestingTxnStore(new IncrementingClock(),new TestingTimestampSource(),HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp, store, new RollForwardStatus(), control, false);
    TxnLifecycleManager tc = mock(TxnLifecycleManager.class);
    doAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
            store.rollback((Long) invocationOnMock.getArguments()[0]);
            return null;
        }
    }).when(tc).rollback(0x100l);

    Txn rolledBackTxn = new WritableTxn(0x100l, 0x100l, null, Txn.IsolationLevel.SNAPSHOT_ISOLATION, Txn.ROOT_TRANSACTION, tc, false,HExceptionFactory.INSTANCE);
    store.recordNewTransaction(rolledBackTxn);
    rolledBackTxn.rollback(); //ensure that it's rolled back

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            rolledBackTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    Txn readTxn = ReadOnlyTxn.createReadOnlyTransaction(0x200l, Txn.ROOT_TRANSACTION, 0x200l,
            Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, mock(TxnLifecycleManager.class),HExceptionFactory.INSTANCE);
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //check to see if the resolver added the proper key value
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 0, result.size());
}
 
Example 13
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedWorks() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    final TestingTimestampSource commitTsGenerator = new TestingTimestampSource();
    final TxnStore store = new TestingTxnStore(new IncrementingClock(),commitTsGenerator,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp,store,new RollForwardStatus(),GreenLight.INSTANCE,false);
    TxnLifecycleManager tc = mock(TxnLifecycleManager.class);
    doAnswer(new Answer<Long>() {
        @Override
        public Long answer(InvocationOnMock invocationOnMock) throws Throwable {
            long next = commitTsGenerator.nextTimestamp();
            store.commit((Long) invocationOnMock.getArguments()[0]);
            return next + SIConstants.TRASANCTION_INCREMENT;
        }
    }).when(tc).commit(anyLong());
    Txn committedTxn = new WritableTxn(0x100l, 0x100l, null, Txn.IsolationLevel.SNAPSHOT_ISOLATION, Txn.ROOT_TRANSACTION, tc, false,HExceptionFactory.INSTANCE);
    store.recordNewTransaction(committedTxn);
    committedTxn.commit();

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            committedTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    Txn readTxn = ReadOnlyTxn.createReadOnlyTransaction(0x300l, Txn.ROOT_TRANSACTION, 0x300l,
            Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, mock(TxnLifecycleManager.class),HExceptionFactory.INSTANCE);
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.INCLUDE, returnCode);

    //check to see if the resolver added the proper key value
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 2, result.size());
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", committedTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", committedTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}
 
Example 14
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedDoesNotHappenUntilParentCommits() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    TestingTimestampSource timestampSource = new TestingTimestampSource();
    TxnStore store = new TestingTxnStore(new IncrementingClock(),timestampSource,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp, store, new RollForwardStatus(), GreenLight.INSTANCE, false);

    ClientTxnLifecycleManager tc = new ClientTxnLifecycleManager(timestampSource,HExceptionFactory.INSTANCE);
    tc.setTxnStore(store);
    tc.setKeepAliveScheduler(new ManualKeepAliveScheduler(store));
    Txn parentTxn = tc.beginTransaction(Bytes.toBytes("1184"));

    Txn childTxn = tc.beginChildTransaction(parentTxn, Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, Bytes.toBytes("1184"));

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            childTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    childTxn.commit();

    Txn readTxn = tc.beginTransaction(); //a read-only transaction with SI semantics
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure the resolver has not added anything
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 1, result.size());

    //commit the parent and see if resolution works then
    parentTxn.commit();

    //now re-read the data and make sure that it resolves
    filter.nextRow();
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure that the read-resolver worked
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 2, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", childTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", childTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}