Java Code Examples for org.apache.hadoop.hbase.KeyValue#getTimestamp()
The following examples show how to use
org.apache.hadoop.hbase.KeyValue#getTimestamp() .
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: phoenix File: ApplyAndFilterDeletesFilter.java License: BSD 3-Clause "New" or "Revised" License | 6 votes |
/** * @param next * @return */ public boolean matchesPoint(KeyValue next) { // point deletes only apply to the exact KV that they reference, so we only need to ensure // that the timestamp matches exactly. Because we sort by timestamp first, either the next // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that // one. if (pointDelete != null && pointDelete.matchingFamily(next) && pointDelete.matchingQualifier(next)) { if (pointDelete.getTimestamp() == next.getTimestamp()) { return true; } // clear the point delete since the TS must not be matching coveringDelete.pointDelete = null; } return false; }
Example 2
Source Project: phoenix File: CoveredColumnsIndexBuilder.java License: Apache License 2.0 | 6 votes |
/** * Batch all the {@link KeyValue}s in a collection of kvs by timestamp. Updates any * {@link KeyValue} with a timestamp == {@link HConstants#LATEST_TIMESTAMP} to the timestamp at * the time the method is called. * @param kvs {@link KeyValue}s to break into batches * @param batches to update with the given kvs */ protected void createTimestampBatchesFromKeyValues(Collection<KeyValue> kvs, Map<Long, Batch> batches) { long now = EnvironmentEdgeManager.currentTimeMillis(); byte[] nowBytes = Bytes.toBytes(now); // batch kvs by timestamp for (KeyValue kv : kvs) { long ts = kv.getTimestamp(); // override the timestamp to the current time, so the index and primary tables match // all the keys with LATEST_TIMESTAMP will then be put into the same batch if (kv.updateLatestStamp(nowBytes)) { ts = now; } Batch batch = batches.get(ts); if (batch == null) { batch = new Batch(ts); batches.put(ts, batch); } batch.add(kv); } }
Example 3
Source Project: phoenix File: ApplyAndFilterDeletesFilter.java License: Apache License 2.0 | 6 votes |
/** * Check to see if we should skip this {@link KeyValue} based on the family. * <p> * Internally, also resets the currently tracked "Delete Family" marker we are tracking if the * keyvalue is into another family (since CFs sort lexicographically, we can discard the current * marker since it must not be applicable to any more kvs in a linear scan). * @param next * @return <tt>true</tt> if this {@link KeyValue} matches a delete. */ public boolean matchesFamily(KeyValue next) { if (deleteFamily == null) { return false; } if (CellUtil.matchingFamily(deleteFamily, next)) { // falls within the timestamp range if (deleteFamily.getTimestamp() >= next.getTimestamp()) { return true; } } else { // only can reset the delete family because we are on to another family deleteFamily = null; } return false; }
Example 4
Source Project: phoenix File: Sequence.java License: BSD 3-Clause "New" or "Revised" License | 6 votes |
public long dropSequence(Result result) throws SQLException { KeyValue statusKV = result.raw()[0]; long timestamp = statusKV.getTimestamp(); int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getBuffer(), statusKV.getValueOffset(), null); SQLExceptionCode code = statusCode == 0 ? null : SQLExceptionCode.fromErrorCode(statusCode); if (code == null) { // Insert delete marker so that point-in-time sequences work insertSequenceValue(new SequenceValue(timestamp, true)); return timestamp; } // TODO: We could have the server return the timestamps of the // delete markers and we could insert them here, but this seems // like overkill. // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { // } throw new SQLExceptionInfo.Builder(code) .setSchemaName(key.getSchemaName()) .setTableName(key.getSequenceName()) .build().buildException(); }
Example 5
Source Project: phoenix File: SchemaUtil.java License: BSD 3-Clause "New" or "Revised" License | 6 votes |
private static KeyValue upgradeTo3(KeyValue keyValue) { byte[] buf = keyValue.getBuffer(); int newLength = keyValue.getRowLength() + 1; byte[] newKey = new byte[newLength]; newKey[0] = QueryConstants.SEPARATOR_BYTE; System.arraycopy(buf, keyValue.getRowOffset(), newKey, 1, keyValue.getRowLength()); byte[] valueBuf = updateValueIfNecessary(keyValue); int valueOffset = keyValue.getValueOffset(); int valueLength = keyValue.getValueLength(); if (valueBuf != buf) { valueOffset = 0; valueLength = valueBuf.length; } return new KeyValue(newKey, 0, newLength, buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), keyValue.getTimestamp(), Type.codeToType(keyValue.getType()), valueBuf, valueOffset, valueLength); }
Example 6
Source Project: phoenix File: Sequence.java License: Apache License 2.0 | 6 votes |
public SequenceValue(Result r, ValueOp op) { KeyValue currentValueKV = getCurrentValueKV(r); KeyValue incrementByKV = getIncrementByKV(r); KeyValue cacheSizeKV = getCacheSizeKV(r); KeyValue minValueKV = getMinValueKV(r); KeyValue maxValueKV = getMaxValueKV(r); KeyValue cycleKV = getCycleKV(r); this.timestamp = currentValueKV.getTimestamp(); this.nextValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault()); this.incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault()); this.cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), SortOrder.getDefault()); this.minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), minValueKV.getValueOffset(), SortOrder.getDefault()); this.maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), maxValueKV.getValueOffset(), SortOrder.getDefault()); this.cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), cycleKV.getValueOffset(), cycleKV.getValueLength()); this.limitReached = false; currentValue = nextValue; if (op != ValueOp.VALIDATE_SEQUENCE) { currentValue -= incrementBy * cacheSize; } }
Example 7
Source Project: phoenix File: ApplyAndFilterDeletesFilter.java License: Apache License 2.0 | 6 votes |
/** * @param next * @return */ public boolean matchesPoint(KeyValue next) { // point deletes only apply to the exact KV that they reference, so we only need to ensure // that the timestamp matches exactly. Because we sort by timestamp first, either the next // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that // one. if (pointDelete != null && CellUtil.matchingFamily(pointDelete, next) && CellUtil.matchingQualifier(pointDelete, next)) { if (pointDelete.getTimestamp() == next.getTimestamp()) { return true; } // clear the point delete since the TS must not be matching pointDelete = null; } return false; }
Example 8
Source Project: Halyard File: HBaseSailHashConflictTest.java License: Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { try (HTable table = HalyardTableUtils.getTable(HBaseServerTestInstance.getInstanceConfig(), "testConflictingHash", true, 0)) { long timestamp = System.currentTimeMillis(); KeyValue triple[] = HalyardTableUtils.toKeyValues(SUBJ, PRED, OBJ, null, false, timestamp); KeyValue conflicts[][] = new KeyValue[][] { HalyardTableUtils.toKeyValues(SUBJ, PRED, CONF, null, false, timestamp), HalyardTableUtils.toKeyValues(SUBJ, CONF, OBJ, null, false, timestamp), HalyardTableUtils.toKeyValues(SUBJ, CONF, CONF, null, false, timestamp), HalyardTableUtils.toKeyValues(CONF, PRED, OBJ, null, false, timestamp), HalyardTableUtils.toKeyValues(CONF, PRED, CONF, null, false, timestamp), HalyardTableUtils.toKeyValues(CONF, CONF, OBJ, null, false, timestamp), HalyardTableUtils.toKeyValues(CONF, CONF, CONF, null, false, timestamp), }; for (int i=0; i<triple.length; i++) { KeyValue kv = triple[i]; table.put(new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), kv.getTimestamp()).add(kv)); for (int j=0; j<conflicts.length; j++) { KeyValue xkv = new KeyValue(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), conflicts[j][i].getQualifierArray(), conflicts[j][i].getQualifierOffset(), conflicts[j][i].getQualifierLength(), kv.getTimestamp(), KeyValue.Type.Put, conflicts[j][i].getValueArray(), conflicts[j][i].getValueOffset(), conflicts[j][i].getValueLength()); table.put(new Put(xkv.getRowArray(), xkv.getRowOffset(), xkv.getRowLength(), xkv.getTimestamp()).add(xkv)); } } table.flushCommits(); } sail = new HBaseSail(HBaseServerTestInstance.getInstanceConfig(), "testConflictingHash", false, 0, true, 0, null, null); sail.initialize(); }
Example 9
Source Project: phoenix File: UpgradeUtil.java License: Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) { byte[] buf = keyValue.getBuffer(); int length = keyValue.getRowLength(); int offset = keyValue.getRowOffset(); boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0; if (!isViewSeq && nSaltBuckets == 0) { return null; } byte[] newBuf; if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it if (buf[length-1] == 0) { // Global indexes on views have trailing null byte length--; } byte[][] rowKeyMetaData = new byte[3][]; SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData); byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length]; System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length); byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; PName physicalName = PNameFactory.newName(unprefixedSchemaName); // Reformulate key based on correct data newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets).getKey(); } else { newBuf = new byte[length + 1]; System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length); newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets); } return new KeyValue(newBuf, 0, newBuf.length, buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()), buf, keyValue.getValueOffset(), keyValue.getValueLength()); }
Example 10
Source Project: phoenix File: ApplyAndFilterDeletesFilter.java License: BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * @param next * @return */ public boolean matchesColumn(KeyValue next) { if (deleteColumn == null) { return false; } if (deleteColumn.matchingFamily(next) && deleteColumn.matchingQualifier(next)) { // falls within the timestamp range if (deleteColumn.getTimestamp() >= next.getTimestamp()) { return true; } } else { deleteColumn = null; } return false; }
Example 11
Source Project: hbase File: TestTags.java License: Apache License 2.0 | 5 votes |
private void updateMutationAddingTags(final Mutation m) { byte[] attribute = m.getAttribute("visibility"); byte[] cf = null; List<Cell> updatedCells = new ArrayList<>(); if (attribute != null) { for (List<? extends Cell> edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); if (cf == null) { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag((byte) 1, attribute); List<Tag> tagList = new ArrayList<>(); tagList.add(tag); KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(), CellUtil.cloneFamily(kv), 0, kv.getFamilyLength(), CellUtil.cloneQualifier(kv), 0, kv.getQualifierLength(), kv.getTimestamp(), KeyValue.Type.codeToType(kv.getTypeByte()), CellUtil.cloneValue(kv), 0, kv.getValueLength(), tagList); ((List<Cell>) updatedCells).add(newKV); } } m.getFamilyCellMap().remove(cf); // Update the family map m.getFamilyCellMap().put(cf, updatedCells); } }
Example 12
Source Project: phoenix File: Sequence.java License: BSD 3-Clause "New" or "Revised" License | 5 votes |
public SequenceValue(Result r) { KeyValue currentValueKV = getCurrentValueKV(r); KeyValue incrementByKV = getIncrementByKV(r); KeyValue cacheSizeKV = getCacheSizeKV(r); timestamp = currentValueKV.getTimestamp(); nextValue = PDataType.LONG.getCodec().decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null); incrementBy = PDataType.LONG.getCodec().decodeLong(incrementByKV.getBuffer(), incrementByKV.getValueOffset(), null); cacheSize = PDataType.INTEGER.getCodec().decodeInt(cacheSizeKV.getBuffer(), cacheSizeKV.getValueOffset(), null); currentValue = nextValue - incrementBy * cacheSize; }
Example 13
Source Project: phoenix File: ApplyAndFilterDeletesFilter.java License: Apache License 2.0 | 5 votes |
/** * @param next * @return */ public boolean matchesColumn(KeyValue next) { if (deleteColumn == null) { return false; } if (CellUtil.matchingFamily(deleteColumn, next) && CellUtil.matchingQualifier(deleteColumn, next)) { // falls within the timestamp range if (deleteColumn.getTimestamp() >= next.getTimestamp()) { return true; } } else { deleteColumn = null; } return false; }
Example 14
Source Project: phoenix File: ColumnTrackingNextLargestTimestampFilter.java License: BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public ReturnCode filterKeyValue(KeyValue v) { long timestamp = v.getTimestamp(); if (timestamp > ts) { this.column.setTs(timestamp); return ReturnCode.SKIP; } return ReturnCode.INCLUDE; }
Example 15
Source Project: phoenix File: MaxTimestampFilter.java License: BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public ReturnCode filterKeyValue(KeyValue v) { long timestamp = v.getTimestamp(); if (timestamp > ts) { return ReturnCode.SEEK_NEXT_USING_HINT; } return ReturnCode.INCLUDE; }
Example 16
Source Project: phoenix File: ScanProjector.java License: BSD 3-Clause "New" or "Revised" License | 4 votes |
public ProjectedValueTuple projectResults(Tuple tuple) { byte[] bytesValue = schema.toBytes(tuple, expressions, valueSet, ptr); KeyValue base = tuple.getValue(0); return new ProjectedValueTuple(base.getBuffer(), base.getRowOffset(), base.getRowLength(), base.getTimestamp(), bytesValue, valueSet.getEstimatedLength()); }
Example 17
Source Project: hbase File: TestDefaultMemStore.java License: Apache License 2.0 | 4 votes |
/** * When we insert a higher-memstoreTS deletion of a cell but with * the same timestamp, we still need to provide consistent reads * for the same scanner. */ @Test public void testMemstoreDeletesVisibilityWithSameKey() throws IOException { final byte[] row = Bytes.toBytes(1); final byte[] f = Bytes.toBytes("family"); final byte[] q1 = Bytes.toBytes("q1"); final byte[] q2 = Bytes.toBytes("q2"); final byte[] v1 = Bytes.toBytes("value1"); // INSERT 1: Write both columns val1 MultiVersionConcurrencyControl.WriteEntry w = mvcc.begin(); KeyValue kv11 = new KeyValue(row, f, q1, v1); kv11.setSequenceId(w.getWriteNumber()); memstore.add(kv11, null); KeyValue kv12 = new KeyValue(row, f, q2, v1); kv12.setSequenceId(w.getWriteNumber()); memstore.add(kv12, null); mvcc.completeAndWait(w); // BEFORE STARTING INSERT 2, SEE FIRST KVS KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); assertScannerResults(s, new KeyValue[]{kv11, kv12}); // START DELETE: Insert delete for one of the columns w = mvcc.begin(); KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(), KeyValue.Type.DeleteColumn); kvDel.setSequenceId(w.getWriteNumber()); memstore.add(kvDel, null); // BEFORE COMPLETING DELETE, SEE FIRST KVS s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); assertScannerResults(s, new KeyValue[]{kv11, kv12}); // COMPLETE DELETE mvcc.completeAndWait(w); // NOW WE SHOULD SEE DELETE s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12}); }
Example 18
Source Project: hbase File: TestBufferedDataBlockEncoder.java License: Apache License 2.0 | 4 votes |
@Test public void testKVCodecWithTagsForDecodedCellsWithNoTags() throws Exception { KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1")); // kv1.getKey() return a copy of the Key bytes which starts from RK_length. Means from offsets, // we need to reduce the KL and VL parts. OnheapDecodedCell c1 = new OnheapDecodedCell(kv1.getKey(), kv1.getRowLength(), kv1.getFamilyOffset() - KeyValue.ROW_OFFSET, kv1.getFamilyLength(), kv1.getQualifierOffset() - KeyValue.ROW_OFFSET, kv1.getQualifierLength(), kv1.getTimestamp(), kv1.getTypeByte(), kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(), kv1.getSequenceId(), kv1.getTagsArray(), kv1.getTagsOffset(), kv1.getTagsLength()); KeyValue kv2 = new KeyValue(Bytes.toBytes("r2"), Bytes.toBytes("f"), Bytes.toBytes("2"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2")); OnheapDecodedCell c2 = new OnheapDecodedCell(kv2.getKey(), kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(), kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(), kv2.getTimestamp(), kv2.getTypeByte(), kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(), kv2.getTagsArray(), kv2.getTagsOffset(), kv2.getTagsLength()); KeyValue kv3 = new KeyValue(Bytes.toBytes("r3"), Bytes.toBytes("cf"), Bytes.toBytes("qual"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3")); BufferedDataBlockEncoder.OffheapDecodedExtendedCell c3 = new BufferedDataBlockEncoder.OffheapDecodedExtendedCell(ByteBuffer.wrap(kv2.getKey()), kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(), kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(), kv2.getTimestamp(), kv2.getTypeByte(), ByteBuffer.wrap(kv2.getValueArray()), kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(), ByteBuffer.wrap(kv2.getTagsArray()), kv2.getTagsOffset(), kv2.getTagsLength()); ByteArrayOutputStream os = new ByteArrayOutputStream(); KeyValueCodecWithTags codec = new KeyValueCodecWithTags(); Encoder encoder = codec.getEncoder(os); encoder.write(c1); encoder.write(c2); encoder.write(c3); ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray()); Decoder decoder = codec.getDecoder(is); assertTrue(decoder.advance()); assertTrue(CellUtil.equals(c1, decoder.current())); assertTrue(decoder.advance()); assertTrue(CellUtil.equals(c2, decoder.current())); assertTrue(decoder.advance()); assertTrue(CellUtil.equals(c3, decoder.current())); assertFalse(decoder.advance()); }
Example 19
Source Project: hbase-secondary-index File: TransactionState.java License: GNU General Public License v3.0 | 4 votes |
KeyValue applyDeletes(final KeyValue kv, final long minTime, final long maxTime) { if (deletes.isEmpty()) { return kv; } for (Delete delete : deletes) { // Skip if delete should not apply if (!Bytes.equals(kv.getRow(), delete.getRow()) || kv.getTimestamp() > delete.getTimeStamp() || delete.getTimeStamp() > maxTime || delete.getTimeStamp() < minTime) { continue; } // Whole-row delete if (delete.isEmpty()) { return null; } for (Entry<byte[], List<KeyValue>> deleteEntry : delete .getFamilyMap().entrySet()) { byte[] family = deleteEntry.getKey(); if (!Bytes.equals(kv.getFamily(), family)) { continue; } List<KeyValue> familyDeletes = deleteEntry.getValue(); if (familyDeletes == null) { return null; } for (KeyValue keyDeletes : familyDeletes) { byte[] deleteQualifier = keyDeletes.getQualifier(); byte[] kvQualifier = kv.getQualifier(); if (keyDeletes.getTimestamp() > kv.getTimestamp() && Bytes.equals(deleteQualifier, kvQualifier)) { return null; } } } } return kv; }
Example 20
Source Project: phoenix File: NewerTimestampFilter.java License: BSD 3-Clause "New" or "Revised" License | 4 votes |
@Override public ReturnCode filterKeyValue(KeyValue ignored) { return ignored.getTimestamp() > timestamp ? ReturnCode.SKIP : ReturnCode.INCLUDE; }