Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#put()

The following examples show how to use org.apache.hadoop.hbase.regionserver.HRegion#put() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRegionObserverScannerOpenHook.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
  Configuration conf = new HBaseTestingUtility().getConfiguration();
  HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
Example 2
Source File: TestForceCacheImportantBlocks.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void writeTestData(HRegion region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.addColumn(CF_BYTES, Bytes.toBytes("col" + j), ts,
                Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
Example 3
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Load region with rows from 'aaa' to 'zzz'.
 * @param r Region
 * @param f Family
 * @param flush flush the cache if true
 * @return Count of rows loaded.
 * @throws IOException
 */
public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
throws IOException {
  byte[] k = new byte[3];
  int rowCount = 0;
  for (byte b1 = 'a'; b1 <= 'z'; b1++) {
    for (byte b2 = 'a'; b2 <= 'z'; b2++) {
      for (byte b3 = 'a'; b3 <= 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.addColumn(f, null, k);
        if (r.getWAL() == null) {
          put.setDurability(Durability.SKIP_WAL);
        }
        int preRowCount = rowCount;
        int pause = 10;
        int maxPause = 1000;
        while (rowCount == preRowCount) {
          try {
            r.put(put);
            rowCount++;
          } catch (RegionTooBusyException e) {
            pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
            Threads.sleep(pause);
          }
        }
      }
    }
    if (flush) {
      r.flush(true);
    }
  }
  return rowCount;
}
 
Example 4
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.addColumn(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.addColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    region.flushcache(true, false, new FlushLifeCycleTracker() { });

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
        new long[]{V[8], V[6], deleteTs},
        new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 5
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    region.flushcache(true, false);

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
        new long[]{V[8], V[6], deleteTs},
        new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 6
Source File: TestIntraRowPagination.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test from client side for scan with maxResultPerCF set
 */
@Test
public void testScanLimitAndOffset() throws Exception {
  //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
  byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
  byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
  byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);

  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(
      TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));

  RegionInfo info = RegionInfoBuilder.newBuilder(HTestConst.DEFAULT_TABLE).build();
  for (byte[] family : FAMILIES) {
    ColumnFamilyDescriptor familyDescriptor =
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family);
    tableDescriptor.setColumnFamily(familyDescriptor);
  }
  HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), tableDescriptor);
  try {
    Put put;
    Scan scan;
    Result result;
    boolean toLog = true;

    List<Cell> kvListExp = new ArrayList<>();

    int storeOffset = 1;
    int storeLimit = 3;
    for (int r = 0; r < ROWS.length; r++) {
      put = new Put(ROWS[r]);
      for (int c = 0; c < FAMILIES.length; c++) {
        for (int q = 0; q < QUALIFIERS.length; q++) {
          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
              HTestConst.DEFAULT_VALUE_BYTES);
          put.add(kv);
          if (storeOffset <= q && q < storeOffset + storeLimit) {
            kvListExp.add(kv);
          }
        }
      }
      region.put(put);
    }

    scan = new Scan();
    scan.setRowOffsetPerColumnFamily(storeOffset);
    scan.setMaxResultsPerColumnFamily(storeLimit);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> kvListScan = new ArrayList<>();
    List<Cell> results = new ArrayList<>();
    while (scanner.next(results) || !results.isEmpty()) {
      kvListScan.addAll(results);
      results.clear();
    }
    result = Result.create(kvListScan);
    TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
        "Testing scan with storeOffset and storeLimit");
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(region);
  }
}
 
Example 7
Source File: TestFilter.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testFilterListWithPrefixFilter() throws IOException {
  byte[] family = Bytes.toBytes("f1");
  byte[] qualifier = Bytes.toBytes("q1");
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(
      TableName.valueOf(name.getMethodName()));

  tableDescriptor.setColumnFamily(
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family));
  RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
  HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
    TEST_UTIL.getConfiguration(), tableDescriptor);

  for(int i=0; i<5; i++) {
    Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
    p.setDurability(Durability.SKIP_WAL);
    p.addColumn(family, qualifier, Bytes.toBytes(String.valueOf(111 + i)));
    testRegion.put(p);
  }
  testRegion.flush(true);

  // rows starting with "b"
  PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
  // rows with value of column 'q1' set to '113'
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(
      family, qualifier, CompareOperator.EQUAL, Bytes.toBytes("113"));
  // combine these two with OR in a FilterList
  FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf);

  Scan s1 = new Scan();
  s1.setFilter(filterList);
  InternalScanner scanner = testRegion.getScanner(s1);
  List<Cell> results = new ArrayList<>();
  int resultCount = 0;
  while (scanner.next(results)) {
    resultCount++;
    byte[] row =  CellUtil.cloneRow(results.get(0));
    LOG.debug("Found row: " + Bytes.toStringBinary(row));
    assertTrue(Bytes.equals(row, Bytes.toBytes("brow"))
        || Bytes.equals(row, Bytes.toBytes("crow")));
    results.clear();
  }
  assertEquals(2, resultCount);
  scanner.close();

  WAL wal = ((HRegion)testRegion).getWAL();
  ((HRegion)testRegion).close();
  wal.close();
}
 
Example 8
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionNameAsString());
    region.flushcache();

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
                          new long[]{V[8], V[6], deleteTs},
                          new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 9
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedDoesNotHappenUntilParentCommits() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    TestingTimestampSource timestampSource = new TestingTimestampSource();
    TxnStore store = new TestingTxnStore(new IncrementingClock(),timestampSource,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp, store, new RollForwardStatus(), GreenLight.INSTANCE, false);

    ClientTxnLifecycleManager tc = new ClientTxnLifecycleManager(timestampSource,HExceptionFactory.INSTANCE);
    tc.setTxnStore(store);
    tc.setKeepAliveScheduler(new ManualKeepAliveScheduler(store));
    Txn parentTxn = tc.beginTransaction(Bytes.toBytes("1184"));

    Txn childTxn = tc.beginChildTransaction(parentTxn, Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, Bytes.toBytes("1184"));

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            childTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    childTxn.commit();

    Txn readTxn = tc.beginTransaction(); //a read-only transaction with SI semantics
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure the resolver has not added anything
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 1, result.size());

    //commit the parent and see if resolution works then
    parentTxn.commit();

    //now re-read the data and make sure that it resolves
    filter.nextRow();
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure that the read-resolver worked
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 2, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", childTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", childTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}
 
Example 10
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedWorks() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    final TestingTimestampSource commitTsGenerator = new TestingTimestampSource();
    final TxnStore store = new TestingTxnStore(new IncrementingClock(),commitTsGenerator,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp,store,new RollForwardStatus(),GreenLight.INSTANCE,false);
    TxnLifecycleManager tc = mock(TxnLifecycleManager.class);
    doAnswer(new Answer<Long>() {
        @Override
        public Long answer(InvocationOnMock invocationOnMock) throws Throwable {
            long next = commitTsGenerator.nextTimestamp();
            store.commit((Long) invocationOnMock.getArguments()[0]);
            return next + SIConstants.TRASANCTION_INCREMENT;
        }
    }).when(tc).commit(anyLong());
    Txn committedTxn = new WritableTxn(0x100l, 0x100l, null, Txn.IsolationLevel.SNAPSHOT_ISOLATION, Txn.ROOT_TRANSACTION, tc, false,HExceptionFactory.INSTANCE);
    store.recordNewTransaction(committedTxn);
    committedTxn.commit();

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            committedTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    Txn readTxn = ReadOnlyTxn.createReadOnlyTransaction(0x300l, Txn.ROOT_TRANSACTION, 0x300l,
            Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, mock(TxnLifecycleManager.class),HExceptionFactory.INSTANCE);
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.INCLUDE, returnCode);

    //check to see if the resolver added the proper key value
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 2, result.size());
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", committedTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", committedTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}
 
Example 11
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testPreExistingData() throws Exception {
  String tableName = "TestPreExistingData";
  byte[] familyBytes = Bytes.toBytes("f");
  long ttlMillis = TimeUnit.DAYS.toMillis(14);
  HRegion region = createRegion(tableName, familyBytes, ttlMillis);
  try {
    region.initialize();

    // timestamps for pre-existing, non-transactional data
    long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS;
    long older = now - ttlMillis / 2;
    long newer = now - ttlMillis / 3;
    // timestamps for transactional data
    long nowTx = txVisibilityState.getVisibilityUpperBound();
    long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS;
    long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS;

    Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    ttls.put(familyBytes, ttlMillis);

    List<Cell> cells = new ArrayList<>();
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11")));
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32")));

    // Write non-transactional and transactional data
    for (Cell c : cells) {
      region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue()));
    }

    Scan rawScan = new Scan();
    rawScan.setMaxVersions();

    Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState);
    Scan txScan = new Scan();
    txScan.setMaxVersions();
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // read all back with raw scanner
    scanAndAssert(region, cells, rawScan);

    // read all back with transaction filter
    scanAndAssert(region, cells, txScan);

    // force a flush to clear the memstore
    region.flushcache();
    scanAndAssert(region, cells, txScan);

    // force a major compaction to remove any expired cells
    region.compactStores(true);
    scanAndAssert(region, cells, txScan);

    // Reduce TTL, this should make cells with timestamps older and olderTx expire
    long newTtl = ttlMillis / 2 - 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // Raw scan should still give all cells
    scanAndAssert(region, cells, rawScan);
    // However, tx scan should not return expired cells
    scanAndAssert(region, select(cells, 1, 3, 5), txScan);

    region.flushcache();
    scanAndAssert(region, cells, rawScan);

    // force a major compaction to remove any expired cells
    region.compactStores(true);
    // This time raw scan too should not return expired cells, as they would be dropped during major compaction
    scanAndAssert(region, select(cells, 1, 3, 5), rawScan);

    // Reduce TTL again to 1 ms, this should expire all cells
    newTtl = 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // force a major compaction to remove expired cells
    region.compactStores(true);
    // This time raw scan should not return any cells, as all cells have expired.
    scanAndAssert(region, Collections.<Cell>emptyList(), rawScan);
  } finally {
    region.close();
  }
}
 
Example 12
Source File: TestWALReplayWithIndexWritesAndCompressedWAL.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
 * seqids.
 * @throws Exception on failure
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
  final String tableNameStr = "testReplayEditsWrittenViaHRegion";
  final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false);
  final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
  deleteDir(basedir);
  final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
  
  //setup basic indexing for the table
  // enable indexing to a non-existant index table
  byte[] family = new byte[] { 'a' };
  ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(htd);

  // create the region + its WAL
  HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
  region0.close();
  region0.getLog().closeAndDelete();
  HLog wal = createWAL(this.conf);
  RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
  // mock out some of the internals of the RSS, so we can run CPs
  Mockito.when(mockRS.getWAL()).thenReturn(wal);
  RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
  Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
  ServerName mockServerName = Mockito.mock(ServerName.class);
  Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234");
  Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
  HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
  long seqid = region.initialize();
  // HRegionServer usually does this. It knows the largest seqid across all regions.
  wal.setSequenceNumber(seqid);
  
  //make an attempted write to the primary that should also be indexed
  byte[] rowkey = Bytes.toBytes("indexed_row_key");
  Put p = new Put(rowkey);
  p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
  region.put(new Put[] { p });

  // we should then see the server go down
  Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
    Mockito.any(Exception.class));
  region.close(true);
  wal.close();

  // then create the index table so we are successful on WAL replay
  CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

  // run the WAL split and setup the region
  runWALSplit(this.conf);
  HLog wal2 = createWAL(this.conf);
  HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

  // initialize the region - this should replay the WALEdits from the WAL
  region1.initialize();

  // now check to ensure that we wrote to the index table
  HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
  int indexSize = getKeyValueCount(index);
  assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
  Get g = new Get(rowkey);
  final Result result = region1.get(g);
  assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

  // cleanup the index table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.disableTable(INDEX_TABLE_NAME);
  admin.deleteTable(INDEX_TABLE_NAME);
  admin.close();
}
 
Example 13
Source File: TestReplicationEndpoint.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testInterClusterReplication() throws Exception {
  final String id = "testInterClusterReplication";

  List<HRegion> regions = UTIL1.getHBaseCluster().getRegions(tableName);
  int totEdits = 0;

  // Make sure edits are spread across regions because we do region based batching
  // before shipping edits.
  for(HRegion region: regions) {
    RegionInfo hri = region.getRegionInfo();
    byte[] row = hri.getStartKey();
    for (int i = 0; i < 100; i++) {
      if (row.length > 0) {
        Put put = new Put(row);
        put.addColumn(famName, row, row);
        region.put(put);
        totEdits++;
      }
    }
  }

  hbaseAdmin.addReplicationPeer(id,
      new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2))
          .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()));

  final int numEdits = totEdits;
  Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
    }

    @Override
    public String explainFailure() throws Exception {
      String failure = "Failed to replicate all edits, expected = " + numEdits
          + " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get();
      return failure;
    }
  });

  hbaseAdmin.removeReplicationPeer("testInterClusterReplication");
  UTIL1.deleteTableData(tableName);
}
 
Example 14
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDataJanitorRegionScanner() throws Exception {
  String tableName = "TestRegionScanner";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3));
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    for (int i = 1; i <= 8; i++) {
      for (int k = 1; k <= i; k++) {
        Put p = new Put(Bytes.toBytes(i));
        p.addColumn(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k]));
        region.put(p);
      }
    }

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set

    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    FlushResultImpl flushResult = region.flushcache(true, false, new FlushLifeCycleTracker() { });
    Assert.assertTrue("Unexpected flush result: " + flushResult, flushResult.isFlushSucceeded());

    // now a normal scan should only return the valid rows
    // do not use a filter here to test that cleanup works on flush
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);

    // first returned value should be "4" with version "4"
    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 4, new long[]{V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 5, new long[] {V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 6, new long[]{V[6], V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 7, new long[]{V[6], V[4]});

    results.clear();
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]});
  } finally {
    region.close();
  }
}
 
Example 15
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testPreExistingData() throws Exception {
  String tableName = "TestPreExistingData";
  byte[] familyBytes = Bytes.toBytes("f");
  long ttlMillis = TimeUnit.DAYS.toMillis(14);
  HRegion region = createRegion(tableName, familyBytes, ttlMillis);
  try {
    region.initialize();

    // timestamps for pre-existing, non-transactional data
    long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS;
    long older = now - ttlMillis / 2;
    long newer = now - ttlMillis / 3;
    // timestamps for transactional data
    long nowTx = txVisibilityState.getVisibilityUpperBound();
    long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS;
    long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS;

    Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    ttls.put(familyBytes, ttlMillis);

    List<Cell> cells = new ArrayList<>();
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11")));
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32")));

    // Write non-transactional and transactional data
    for (Cell c : cells) {
      region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue()));
    }

    Scan rawScan = new Scan();
    rawScan.setMaxVersions();

    Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState);
    Scan txScan = new Scan();
    txScan.setMaxVersions();
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // read all back with raw scanner
    scanAndAssert(region, cells, rawScan);

    // read all back with transaction filter
    scanAndAssert(region, cells, txScan);

    // force a flush to clear the memstore
    region.flushcache(true, false);
    scanAndAssert(region, cells, txScan);

    // force a major compaction to remove any expired cells
    region.compact(true);
    scanAndAssert(region, cells, txScan);

    // Reduce TTL, this should make cells with timestamps older and olderTx expire
    long newTtl = ttlMillis / 2 - 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // Raw scan should still give all cells
    scanAndAssert(region, cells, rawScan);
    // However, tx scan should not return expired cells
    scanAndAssert(region, select(cells, 1, 3, 5), txScan);

    region.flushcache(true, false);
    scanAndAssert(region, cells, rawScan);

    // force a major compaction to remove any expired cells
    region.compact(true);
    // This time raw scan too should not return expired cells, as they would be dropped during major compaction
    scanAndAssert(region, select(cells, 1, 3, 5), rawScan);

    // Reduce TTL again to 1 ms, this should expire all cells
    newTtl = 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // force a major compaction to remove expired cells
    region.compact(true);
    // This time raw scan should not return any cells, as all cells have expired.
    scanAndAssert(region, Collections.<Cell>emptyList(), rawScan);
  } finally {
    region.close();
  }
}
 
Example 16
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testPreExistingData() throws Exception {
  String tableName = "TestPreExistingData";
  byte[] familyBytes = Bytes.toBytes("f");
  long ttlMillis = TimeUnit.DAYS.toMillis(14);
  HRegion region = createRegion(tableName, familyBytes, ttlMillis);
  try {
    region.initialize();

    // timestamps for pre-existing, non-transactional data
    long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS;
    long older = now - ttlMillis / 2;
    long newer = now - ttlMillis / 3;
    // timestamps for transactional data
    long nowTx = txVisibilityState.getVisibilityUpperBound();
    long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS;
    long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS;

    Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    ttls.put(familyBytes, ttlMillis);

    List<Cell> cells = new ArrayList<>();
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11")));
    cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21")));
    cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31")));
    cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32")));

    // Write non-transactional and transactional data
    for (Cell c : cells) {
      region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue()));
    }

    Scan rawScan = new Scan();
    rawScan.setMaxVersions();

    Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState);
    Scan txScan = new Scan();
    txScan.setMaxVersions();
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // read all back with raw scanner
    scanAndAssert(region, cells, rawScan);

    // read all back with transaction filter
    scanAndAssert(region, cells, txScan);

    // force a flush to clear the memstore
    region.flushcache();
    scanAndAssert(region, cells, txScan);

    // force a major compaction to remove any expired cells
    region.compactStores(true);
    scanAndAssert(region, cells, txScan);

    // Reduce TTL, this should make cells with timestamps older and olderTx expire
    long newTtl = ttlMillis / 2 - 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // Raw scan should still give all cells
    scanAndAssert(region, cells, rawScan);
    // However, tx scan should not return expired cells
    scanAndAssert(region, select(cells, 1, 3, 5), txScan);

    region.flushcache();
    scanAndAssert(region, cells, rawScan);

    // force a major compaction to remove any expired cells
    region.compactStores(true);
    // This time raw scan too should not return expired cells, as they would be dropped during major compaction
    scanAndAssert(region, select(cells, 1, 3, 5), rawScan);

    // Reduce TTL again to 1 ms, this should expire all cells
    newTtl = 1;
    region = updateTtl(region, familyBytes, newTtl);
    ttls.put(familyBytes, newTtl);
    txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true),
                        TxUtils.getMaxVisibleTimestamp(dummyTransaction));
    txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN));

    // force a major compaction to remove expired cells
    region.compactStores(true);
    // This time raw scan should not return any cells, as all cells have expired.
    scanAndAssert(region, Collections.<Cell>emptyList(), rawScan);
  } finally {
    region.close();
  }
}
 
Example 17
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes, deleteTs);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionNameAsString());
    region.flushcache();

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
                          new long[]{V[8], V[6], deleteTs},
                          new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}
 
Example 18
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDataJanitorRegionScanner() throws Exception {
  String tableName = "TestRegionScanner";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, TimeUnit.HOURS.toMillis(3));
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    for (int i = 1; i <= 8; i++) {
      for (int k = 1; k <= i; k++) {
        Put p = new Put(Bytes.toBytes(i));
        p.add(familyBytes, columnBytes, V[k], Bytes.toBytes(V[k]));
        region.put(p);
      }
    }

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, the coprocessor should drop all KeyValues with timestamps in the invalid set
    LOG.info("Flushing region " + region.getRegionNameAsString());
    region.flushcache(); // in 0.96, there is no indication of success

    // now a normal scan should only return the valid rows - testing that cleanup works on flush
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);

    // first returned value should be "4" with version "4"
    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 4, new long[] {V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 5, new long[] {V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 6, new long[] {V[6], V[4]});

    results.clear();
    assertTrue(regionScanner.next(results));
    assertKeyValueMatches(results, 7, new long[] {V[6], V[4]});

    results.clear();
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 8, new long[] {V[8], V[6], V[4]});
  } finally {
    region.close();
  }
}
 
Example 19
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
   * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
   * seqids.
   * @throws Exception on failure
   */
@Test
  public void testReplayEditsWrittenViaHRegion() throws Exception {
    final String tableNameStr = "testReplayEditsWrittenViaHRegion";
    final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build();
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
    
    //setup basic indexing for the table
    // enable indexing to a non-existant index table
    byte[] family = new byte[] { 'a' };
    ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
    fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
    CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
    builder.addIndexGroup(fam1);
    builder.build(htd);
    WALFactory walFactory = new WALFactory(this.conf, "localhost,1234");

    WAL wal = createWAL(this.conf, walFactory);
    // create the region + its WAL
    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type
    region0.close();
    region0.getWAL().close();

    HRegionServer mockRS = Mockito.mock(HRegionServer.class);
    // mock out some of the internals of the RSS, so we can run CPs
    when(mockRS.getWAL(null)).thenReturn(wal);
    RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
    when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
    ServerName mockServerName = Mockito.mock(ServerName.class);
    when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234");
    when(mockRS.getServerName()).thenReturn(mockServerName);
    HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS));
    region.initialize();


    //make an attempted write to the primary that should also be indexed
    byte[] rowkey = Bytes.toBytes("indexed_row_key");
    Put p = new Put(rowkey);
    p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
    region.put(p);

    // we should then see the server go down
    Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
      Mockito.any(Exception.class));

    // then create the index table so we are successful on WAL replay
    TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME);

    // run the WAL split and setup the region
    runWALSplit(this.conf, walFactory);
    WAL wal2 = createWAL(this.conf, walFactory);
    HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

    // initialize the region - this should replay the WALEdits from the WAL
    region1.initialize();
    org.apache.hadoop.hbase.client.Connection hbaseConn =
            ConnectionFactory.createConnection(UTIL.getConfiguration());

    // now check to ensure that we wrote to the index table
    Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME));
    int indexSize = getKeyValueCount(index);
    assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
    Get g = new Get(rowkey);
    final Result result = region1.get(g);
    assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

    // cleanup the index table
    Admin admin = UTIL.getAdmin();
    admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME));
    admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME));
    admin.close();
  }
 
Example 20
Source File: TransactionProcessorTest.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
@Test
public void testDeleteFiltering() throws Exception {
  String tableName = "TestDeleteFiltering";
  byte[] familyBytes = Bytes.toBytes("f");
  byte[] columnBytes = Bytes.toBytes("c");
  HRegion region = createRegion(tableName, familyBytes, 0);
  try {
    region.initialize();
    TransactionStateCache cache = new TransactionStateCacheSupplier(conf).get();
    LOG.info("Coprocessor is using transaction state: " + waitForTransactionState(cache));

    byte[] row = Bytes.toBytes(1);
    for (int i = 4; i < V.length; i++) {
      Put p = new Put(row);
      p.add(familyBytes, columnBytes, V[i], Bytes.toBytes(V[i]));
      region.put(p);
    }

    // delete from the third entry back
    // take that cell's timestamp + 1 to simulate a delete in a new tx
    long deleteTs = V[5] + 1;
    Delete d = new Delete(row, deleteTs);
    LOG.info("Issuing delete at timestamp " + deleteTs);
    // row deletes are not yet supported (TransactionAwareHTable normally handles this)
    d.deleteColumns(familyBytes, columnBytes);
    region.delete(d);

    List<Cell> results = Lists.newArrayList();

    // force a flush to clear the data
    // during flush, we should drop the deleted version, but not the others
    LOG.info("Flushing region " + region.getRegionInfo().getRegionNameAsString());
    region.flushcache(true, false);

    // now a normal scan should return row with versions at: V[8], V[6].
    // V[7] is invalid and V[5] and prior are deleted.
    Scan scan = new Scan();
    scan.setMaxVersions(10);
    RegionScanner regionScanner = region.getScanner(scan);
    // should be only one row
    assertFalse(regionScanner.next(results));
    assertKeyValueMatches(results, 1,
        new long[]{V[8], V[6], deleteTs},
        new byte[][]{Bytes.toBytes(V[8]), Bytes.toBytes(V[6]), new byte[0]});
  } finally {
    region.close();
  }
}