Java Code Examples for org.apache.hadoop.hbase.client.Result#getColumnLatestCell()

The following examples show how to use org.apache.hadoop.hbase.client.Result#getColumnLatestCell() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: QuotaTableUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided
 * {@link Result} and adds them to the given {@link Map}. If the result does not contain
 * the expected information or the serialized policy in the value is invalid, this method
 * will throw an {@link IllegalArgumentException}.
 *
 * @param result A row from the quota table.
 * @param snapshots A map of snapshots to add the result of this method into.
 */
public static void extractQuotaSnapshot(
    Result result, Map<TableName,SpaceQuotaSnapshot> snapshots) {
  byte[] row = Objects.requireNonNull(result).getRow();
  if (row == null || row.length == 0) {
    throw new IllegalArgumentException("Provided result had a null row");
  }
  final TableName targetTableName = getTableFromRowKey(row);
  Cell c = result.getColumnLatestCell(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY);
  if (c == null) {
    throw new IllegalArgumentException("Result did not contain the expected column "
        + QUOTA_POLICY_COLUMN + ", " + result.toString());
  }
  ByteString buffer = UnsafeByteOperations.unsafeWrap(
      c.getValueArray(), c.getValueOffset(), c.getValueLength());
  try {
    QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer);
    snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot));
  } catch (InvalidProtocolBufferException e) {
    throw new IllegalArgumentException(
        "Result did not contain a valid SpaceQuota protocol buffer message", e);
  }
}
 
Example 2
Source File: IndexMetadataModel.java    From hgraphdb with Apache License 2.0 6 votes vote down vote up
public IndexMetadata deserialize(Result result) {
    byte[] bytes = result.getRow();
    PositionedByteRange buffer = new SimplePositionedByteRange(bytes);
    String label = OrderedBytes.decodeString(buffer);
    String propertyKey = OrderedBytes.decodeString(buffer);
    ElementType type = OrderedBytes.decodeInt8(buffer) == 1 ? ElementType.VERTEX : ElementType.EDGE;
    Cell uniqueCell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Constants.UNIQUE_BYTES);
    boolean isUnique = ValueUtils.deserialize(CellUtil.cloneValue(uniqueCell));
    Cell stateCell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Constants.INDEX_STATE_BYTES);
    State state = State.valueOf(ValueUtils.deserialize(CellUtil.cloneValue(stateCell)));
    Cell createdAtCell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Constants.CREATED_AT_BYTES);
    Long createdAt = ValueUtils.deserialize(CellUtil.cloneValue(createdAtCell));
    Cell updatedAtCell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Constants.UPDATED_AT_BYTES);
    Long updatedAt = ValueUtils.deserialize(CellUtil.cloneValue(updatedAtCell));
    return new IndexMetadata(type, label, propertyKey, isUnique, state, createdAt, updatedAt);
}
 
Example 3
Source File: DataJanitorState.java    From phoenix-tephra with Apache License 2.0 6 votes vote down vote up
/**
 * Gets a list of {@link RegionPruneInfo} for given regions. Returns all regions if the given regions set is null.
 *
 * @param regions a set of regions
 * @return list of {@link RegionPruneInfo}s.
 * @throws IOException when not able to read the data from HBase
 */
public List<RegionPruneInfo> getPruneInfoForRegions(@Nullable SortedSet<byte[]> regions) throws IOException {
  List<RegionPruneInfo> regionPruneInfos = new ArrayList<>();
  try (Table stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (regions == null || regions.contains(region)) {
          Cell cell = next.getColumnLatestCell(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (cell != null) {
            byte[] pruneUpperBoundBytes = CellUtil.cloneValue(cell);
            long timestamp = cell.getTimestamp();
            regionPruneInfos.add(new RegionPruneInfo(region, Bytes.toStringBinary(region),
                                                     Bytes.toLong(pruneUpperBoundBytes), timestamp));
          }
        }
      }
    }
  }
  return Collections.unmodifiableList(regionPruneInfos);
}
 
Example 4
Source File: TestMobStoreScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  setUp(0, tableName);
  createRecordAndCorruptMobFile(tableName, row1, family, qf1, Bytes.toBytes("value1"));
  Get get = new Get(row1);
  get.setAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS, Bytes.toBytes(true));
  Result result = table.get(get);
  Cell cell = result.getColumnLatestCell(family, qf1);
  Assert.assertEquals(0, cell.getValueLength());
}
 
Example 5
Source File: AgentLifeCycleMapper.java    From pinpoint with Apache License 2.0 5 votes vote down vote up
@Override
public AgentLifeCycleBo mapRow(Result result, int rowNum) throws Exception {
    if (result.isEmpty()) {
        return null;
    }

    Cell valueCell = result.getColumnLatestCell(AGENT_LIFECYCLE_STATUS.getName(), HbaseColumnFamily.AGENT_LIFECYCLE_STATUS.QUALIFIER_STATES);
    
    return createAgentLifeCycleBo(valueCell);
}
 
Example 6
Source File: AppSummaryService.java    From hraven with Apache License 2.0 5 votes vote down vote up
private boolean updateCost(AppAggregationKey appAggKey, Table aggTable,
    JobDetails jobDetails) throws IOException {
  byte[] rowKey = aggConv.toBytes(appAggKey);

  Get g = new Get(rowKey);
  g.addColumn(AggregationConstants.INFO_FAM_BYTES,
      AggregationConstants.JOBCOST_BYTES);
  Result r = aggTable.get(g);
  double existingCost = 0.0;
  byte[] existingCostBytes = null;
  Cell columnLatest =
      r.getColumnLatestCell(AggregationConstants.INFO_FAM_BYTES,
          AggregationConstants.JOBCOST_BYTES);

  if (columnLatest != null) {
    existingCost = Bytes.toDouble(CellUtil.cloneValue(columnLatest));
    existingCostBytes = Bytes.toBytes(existingCost);
  }

  double newCost = existingCost + jobDetails.getCost();
  if (LOG.isTraceEnabled()) {
    LOG.trace(" total app aggregated cost  " + newCost);
  }
  // now insert cost
  return executeCheckAndPut(aggTable, rowKey, existingCostBytes,
      Bytes.toBytes(newCost), AggregationConstants.INFO_FAM_BYTES,
      AggregationConstants.JOBCOST_BYTES);

}
 
Example 7
Source File: LabelConnectionModel.java    From hgraphdb with Apache License 2.0 5 votes vote down vote up
public LabelConnection deserialize(Result result) {
    byte[] bytes = result.getRow();
    PositionedByteRange buffer = new SimplePositionedByteRange(bytes);
    String outVertexLabel = OrderedBytes.decodeString(buffer);
    String edgeLabel = OrderedBytes.decodeString(buffer);
    String inVertexLabel = OrderedBytes.decodeString(buffer);
    Cell createdAtCell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Constants.CREATED_AT_BYTES);
    Long createdAt = ValueUtils.deserialize(CellUtil.cloneValue(createdAtCell));
    return new LabelConnection(outVertexLabel, edgeLabel, inVertexLabel, createdAt);
}
 
Example 8
Source File: Mutators.java    From hgraphdb with Apache License 2.0 5 votes vote down vote up
public static long increment(Table table, Mutator writer, String key) {
    List<Mutation> batch = new ArrayList<>();
    writer.constructMutations().forEachRemaining(batch::add);
    Object[] results = write(table, batch);
    // Increment result is the first
    Result result = (Result) results[0];
    Cell cell = result.getColumnLatestCell(Constants.DEFAULT_FAMILY_BYTES, Bytes.toBytes(key));
    return Bytes.toLong(CellUtil.cloneValue(cell));
}
 
Example 9
Source File: GridTableHBaseBenchmark.java    From Kylin with Apache License 2.0 5 votes vote down vote up
private void consume(Result r, int nBytesToConsume) {
    Cell cell = r.getColumnLatestCell(CF, QN);
    byte mix = 0;
    byte[] valueArray = cell.getValueArray();
    int n = Math.min(nBytesToConsume, cell.getValueLength());
    for (int i = 0; i < n; i++) {
        mix ^= valueArray[i];
        bytesRead++;
    }
    discard(mix);
    rowsRead++;
}
 
Example 10
Source File: AppSummaryService.java    From hraven with Apache License 2.0 5 votes vote down vote up
/**
 * updates the queue list for this app aggregation
 * @throws IOException
 */
boolean updateQueue(AppAggregationKey appAggKey, Table aggTable,
    JobDetails jobDetails) throws IOException {
  byte[] rowKey = aggConv.toBytes(appAggKey);

  Get g = new Get(rowKey);
  g.addColumn(AggregationConstants.INFO_FAM_BYTES,
      AggregationConstants.HRAVEN_QUEUE_BYTES);
  Result r = aggTable.get(g);

  Cell existingQueuesCell =
      r.getColumnLatestCell(AggregationConstants.INFO_FAM_BYTES,
          AggregationConstants.HRAVEN_QUEUE_BYTES);
  String existingQueues = null;
  byte[] existingQueuesBytes = null;
  if (existingQueuesCell != null) {
    existingQueues = Bytes.toString(CellUtil.cloneValue(existingQueuesCell));
    existingQueuesBytes = Bytes.toBytes(existingQueues);
  }

  // get details for the queue list to be inserted
  String insertQueues = createQueueListValue(jobDetails, existingQueues);

  // if existing and to be inserted queue lists are different, then
  // execute check and put
  if (insertQueues.equalsIgnoreCase(existingQueues)) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Queue already present in aggregation for this app "
          + existingQueues + " " + insertQueues);
    }
    return true;
  } else {
    return executeCheckAndPut(aggTable, rowKey, existingQueuesBytes,
        Bytes.toBytes(insertQueues), AggregationConstants.INFO_FAM_BYTES,
        AggregationConstants.HRAVEN_QUEUE_BYTES);
  }
}
 
Example 11
Source File: GridTableHBaseBenchmark.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private void consume(Result r, int nBytesToConsume) {
    Cell cell = r.getColumnLatestCell(CF, QN);
    byte mix = 0;
    byte[] valueArray = cell.getValueArray();
    int n = Math.min(nBytesToConsume, cell.getValueLength());
    for (int i = 0; i < n; i++) {
        mix ^= valueArray[i];
        bytesRead++;
    }
    discard(mix);
    rowsRead++;
}
 
Example 12
Source File: TestMetaTableAccessor.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException {
  long regionId = System.currentTimeMillis();
  RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
    .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
    .setRegionId(regionId).setReplicaId(0).build();

  Table meta = MetaTableAccessor.getMetaHTable(connection);
  try {
    List<RegionInfo> regionInfos = Lists.newArrayList(primary);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
    MetaTableAccessor.removeRegionReplicasFromMeta(Sets.newHashSet(primary.getRegionName()), 1, 2,
      connection);
    Get get = new Get(primary.getRegionName());
    Result result = meta.get(get);
    for (int replicaId = 0; replicaId < 3; replicaId++) {
      Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        CatalogFamilyFormat.getServerColumn(replicaId));
      Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        CatalogFamilyFormat.getStartCodeColumn(replicaId));
      Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        CatalogFamilyFormat.getRegionStateColumn(replicaId));
      Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
        CatalogFamilyFormat.getServerNameColumn(replicaId));
      if (replicaId == 0) {
        assertNotNull(stateCell);
      } else {
        assertNull(serverCell);
        assertNull(startCodeCell);
        assertNull(stateCell);
        assertNull(snCell);
      }
    }
  } finally {
    meta.close();
  }
}
 
Example 13
Source File: CatalogFamilyFormat.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY
 * @return null if not found
 */
@Nullable
public static TableState getTableState(Result r) throws IOException {
  Cell cell = r.getColumnLatestCell(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER);
  if (cell == null) {
    return null;
  }
  try {
    return TableState.parseFrom(TableName.valueOf(r.getRow()),
      Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(),
        cell.getValueOffset() + cell.getValueLength()));
  } catch (DeserializationException e) {
    throw new IOException(e);
  }
}
 
Example 14
Source File: TestMetaTableAccessor.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void assertEmptyMetaLocation(Table meta, byte[] row, int replicaId)
  throws IOException {
  Get get = new Get(row);
  Result result = meta.get(get);
  Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    CatalogFamilyFormat.getServerColumn(replicaId));
  Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
    CatalogFamilyFormat.getStartCodeColumn(replicaId));
  assertNotNull(serverCell);
  assertNotNull(startCodeCell);
  assertEquals(0, serverCell.getValueLength());
  assertEquals(0, startCodeCell.getValueLength());
}
 
Example 15
Source File: HBCKMetaTableAccessor.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
/**
 * The latest seqnum that the server writing to meta observed when opening the region.
 * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
 * (Copied from MetaTableAccessor)
 * @param r Result to pull the seqNum from
 * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
 */
private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
  Cell cell = r.getColumnLatestCell(CATALOG_FAMILY, getSeqNumColumn(replicaId));
  if (cell == null || cell.getValueLength() == 0) {
    return HConstants.NO_SEQNUM;
  }
  return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
 
Example 16
Source File: HBCKMetaTableAccessor.java    From hbase-operator-tools with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
 * <code>qualifier</code> of the catalog table result.
 * (Copied from MetaTableAccessor)
 * @param r a Result object from the catalog table scan
 * @param qualifier Column family qualifier
 * @return An RegionInfo instance or null.
 */
public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) {
  Cell cell = r.getColumnLatestCell(CATALOG_FAMILY, qualifier);
  if (cell == null) {
    return null;
  }
  return RegionInfo.parseFromOrNull(cell.getValueArray(),
    cell.getValueOffset(), cell.getValueLength());
}
 
Example 17
Source File: HBaseResolver.java    From pxf with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the value of a column from a Result object.
 *
 * @param result HBase table row
 * @param column HBase column to be retrieved
 * @return HBase column value
 */
byte[] getColumnValue(Result result, HBaseColumnDescriptor column) {
    // if column does not contain a value, return null
    if (!result.containsColumn(column.columnFamilyBytes(),
            column.qualifierBytes())) {
        return null;
    }

    // else, get the latest version of the requested column
    Cell cell = result.getColumnLatestCell(column.columnFamilyBytes(), column.qualifierBytes());
    return CellUtil.cloneValue(cell);
}
 
Example 18
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedWorks() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    final TestingTimestampSource commitTsGenerator = new TestingTimestampSource();
    final TxnStore store = new TestingTxnStore(new IncrementingClock(),commitTsGenerator,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp,store,new RollForwardStatus(),GreenLight.INSTANCE,false);
    TxnLifecycleManager tc = mock(TxnLifecycleManager.class);
    doAnswer(new Answer<Long>() {
        @Override
        public Long answer(InvocationOnMock invocationOnMock) throws Throwable {
            long next = commitTsGenerator.nextTimestamp();
            store.commit((Long) invocationOnMock.getArguments()[0]);
            return next + SIConstants.TRASANCTION_INCREMENT;
        }
    }).when(tc).commit(anyLong());
    Txn committedTxn = new WritableTxn(0x100l, 0x100l, null, Txn.IsolationLevel.SNAPSHOT_ISOLATION, Txn.ROOT_TRANSACTION, tc, false,HExceptionFactory.INSTANCE);
    store.recordNewTransaction(committedTxn);
    committedTxn.commit();

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            committedTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    Txn readTxn = ReadOnlyTxn.createReadOnlyTransaction(0x300l, Txn.ROOT_TRANSACTION, 0x300l,
            Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, mock(TxnLifecycleManager.class),HExceptionFactory.INSTANCE);
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.INCLUDE, returnCode);

    //check to see if the resolver added the proper key value
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 2, result.size());
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", committedTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", committedTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}
 
Example 19
Source File: SynchronousReadResolverTest.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Test
public void testResolvingCommittedDoesNotHappenUntilParentCommits() throws Exception {
    HRegion region = MockRegionUtils.getMockRegion();
    RegionPartition rp = new RegionPartition(region);

    TestingTimestampSource timestampSource = new TestingTimestampSource();
    TxnStore store = new TestingTxnStore(new IncrementingClock(),timestampSource,HExceptionFactory.INSTANCE,Long.MAX_VALUE);
    ReadResolver resolver = SynchronousReadResolver.getResolver(rp, store, new RollForwardStatus(), GreenLight.INSTANCE, false);

    ClientTxnLifecycleManager tc = new ClientTxnLifecycleManager(timestampSource,HExceptionFactory.INSTANCE);
    tc.setTxnStore(store);
    tc.setKeepAliveScheduler(new ManualKeepAliveScheduler(store));
    Txn parentTxn = tc.beginTransaction(Bytes.toBytes("1184"));

    Txn childTxn = tc.beginChildTransaction(parentTxn, Txn.IsolationLevel.SNAPSHOT_ISOLATION, false, Bytes.toBytes("1184"));

    byte[] rowKey = Encoding.encode("hello");
    Put testPut = new Put(rowKey);
    testPut.addColumn(SIConstants.DEFAULT_FAMILY_BYTES,
            SIConstants.PACKED_COLUMN_BYTES,
            childTxn.getTxnId(), Encoding.encode("hello2"));

    region.put(testPut);

    childTxn.commit();

    Txn readTxn = tc.beginTransaction(); //a read-only transaction with SI semantics
    SimpleTxnFilter filter = new SimpleTxnFilter(null, readTxn,resolver,store);

    Result result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    Cell kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    DataFilter.ReturnCode returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure the resolver has not added anything
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size after read resolve!", 1, result.size());

    //commit the parent and see if resolution works then
    parentTxn.commit();

    //now re-read the data and make sure that it resolves
    filter.nextRow();
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 1, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);

    returnCode = filter.filterCell(new HCell(kv));
    Assert.assertEquals("Incorrect return code!", DataFilter.ReturnCode.SKIP, returnCode);

    //make sure that the read-resolver worked
    result = region.get(new Get(rowKey));
    Assert.assertEquals("Incorrect result size", 2, result.size());
    kv = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.PACKED_COLUMN_BYTES);
    Assert.assertNotNull("No data column found!", kv);
    Cell commitTs = result.getColumnLatestCell(SIConstants.DEFAULT_FAMILY_BYTES, SIConstants.COMMIT_TIMESTAMP_COLUMN_BYTES);
    Assert.assertNotNull("No Commit TS column found!", commitTs);
    Assert.assertEquals("Incorrect committed txnId", childTxn.getTxnId(), commitTs.getTimestamp());
    Assert.assertEquals("Incorrect commit timestamp!", childTxn.getEffectiveCommitTimestamp(), Bytes.toLong(CellUtil.cloneValue(commitTs)));
}
 
Example 20
Source File: TestMetaTableAccessor.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
  long regionId = System.currentTimeMillis();

  RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
    .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false)
    .setRegionId(regionId).setReplicaId(0).build();

  RegionInfo regionInfoB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
    .setStartKey(new byte[] { 'a' }).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
    .setRegionId(regionId).setReplicaId(0).build();
  RegionInfo mergedRegionInfo =
    RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
      .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
      .setRegionId(regionId).setReplicaId(0).build();

  ServerName sn = ServerName.valueOf("bar", 0, 0);
  try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
    List<RegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
    MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);

    // write the serverName column with a big current time, but set the masters time as even
    // bigger. When region merge deletes the rows for regionA and regionB, the serverName columns
    // should not be seen by the following get
    long serverNameTime = EnvironmentEdgeManager.currentTime() + 100000000;
    long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;

    // write the serverName columns
    MetaTableAccessor.updateRegionLocation(connection, regionInfoA, sn, 1, serverNameTime);

    // assert that we have the serverName column with expected ts
    Get get = new Get(mergedRegionInfo.getRegionName());
    Result result = meta.get(get);
    Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      CatalogFamilyFormat.getServerColumn(0));
    assertNotNull(serverCell);
    assertEquals(serverNameTime, serverCell.getTimestamp());

    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    edge.setValue(masterSystemTime);
    EnvironmentEdgeManager.injectEdge(edge);
    try {
      // now merge the regions, effectively deleting the rows for region a and b.
      MetaTableAccessor.mergeRegions(connection, mergedRegionInfo,
        getMapOfRegionsToSeqNum(regionInfoA, regionInfoB), sn, 1);
    } finally {
      EnvironmentEdgeManager.reset();
    }

    result = meta.get(get);
    serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      CatalogFamilyFormat.getServerColumn(0));
    Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      CatalogFamilyFormat.getStartCodeColumn(0));
    Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
      CatalogFamilyFormat.getSeqNumColumn(0));
    assertNull(serverCell);
    assertNull(startCodeCell);
    assertNull(seqNumCell);
  }
}