Java Code Examples for org.apache.hadoop.hbase.KeyValue#getRowOffset()

The following examples show how to use org.apache.hadoop.hbase.KeyValue#getRowOffset() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@BeforeClass
public static void setup() throws Exception {
    try (HTable table = HalyardTableUtils.getTable(HBaseServerTestInstance.getInstanceConfig(), "testConflictingHash", true, 0)) {
        long timestamp = System.currentTimeMillis();
        KeyValue triple[] = HalyardTableUtils.toKeyValues(SUBJ, PRED, OBJ, null, false, timestamp);
        KeyValue conflicts[][] = new KeyValue[][] {
            HalyardTableUtils.toKeyValues(SUBJ, PRED, CONF, null, false, timestamp),
            HalyardTableUtils.toKeyValues(SUBJ, CONF,  OBJ, null, false, timestamp),
            HalyardTableUtils.toKeyValues(SUBJ, CONF, CONF, null, false, timestamp),
            HalyardTableUtils.toKeyValues(CONF, PRED,  OBJ, null, false, timestamp),
            HalyardTableUtils.toKeyValues(CONF, PRED, CONF, null, false, timestamp),
            HalyardTableUtils.toKeyValues(CONF, CONF,  OBJ, null, false, timestamp),
            HalyardTableUtils.toKeyValues(CONF, CONF, CONF, null, false, timestamp),
        };
        for (int i=0; i<triple.length; i++) {
            KeyValue kv = triple[i];
            table.put(new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), kv.getTimestamp()).add(kv));
            for (int j=0; j<conflicts.length; j++) {
                KeyValue xkv = new KeyValue(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
                        kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(),
                        conflicts[j][i].getQualifierArray(), conflicts[j][i].getQualifierOffset(), conflicts[j][i].getQualifierLength(),
                        kv.getTimestamp(), KeyValue.Type.Put,
                        conflicts[j][i].getValueArray(), conflicts[j][i].getValueOffset(), conflicts[j][i].getValueLength());
                table.put(new Put(xkv.getRowArray(), xkv.getRowOffset(), xkv.getRowLength(), xkv.getTimestamp()).add(xkv));
            }
        }
        table.flushCommits();
    }
    sail = new HBaseSail(HBaseServerTestInstance.getInstanceConfig(), "testConflictingHash", false, 0, true, 0, null, null);
    sail.initialize();
}
 
Example 2
Source Project: phoenix   File: CoveredColumnIndexer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
    Collection<KeyValue> filtered) throws IOException {

  // stores all the return values
  IndexUpdateManager updateMap = new IndexUpdateManager();
  // batch the updates by row to make life easier and ordered
  Collection<Batch> batches = batchByRow(filtered);

  for (Batch batch : batches) {
    KeyValue curKV = batch.getKvs().iterator().next();
    Put p = new Put(curKV.getRowArray(), curKV.getRowOffset(), curKV.getRowLength());
    for (KeyValue kv : batch.getKvs()) {
      // we only need to cleanup Put entries
      byte type = kv.getTypeByte();
      Type t = KeyValue.Type.codeToType(type);
      if (!t.equals(Type.Put)) {
        continue;
      }

      // add the kv independently
      p.add(kv);
    }

    // do the usual thing as for deletes
    Collection<Batch> timeBatch = createTimestampBatchesFromMutation(p);
    LocalTableState state = new LocalTableState(env, localTable, p);
    for (Batch entry : timeBatch) {
      //just set the timestamp on the table - it already has all the future state
      state.setCurrentTimestamp(entry.getTimestamp());
      this.addDeleteUpdatesToMap(updateMap, state, entry.getTimestamp());
    }
  }
  return updateMap.toMap();
}
 
Example 3
Source Project: phoenix   File: UpgradeUtil.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) {
    byte[] buf = keyValue.getBuffer();
    int length = keyValue.getRowLength();
    int offset = keyValue.getRowOffset();
    boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
    if (!isViewSeq && nSaltBuckets == 0) {
        return null;
    }
    byte[] newBuf;
    if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it
        if (buf[length-1] == 0) { // Global indexes on views have trailing null byte
            length--;
        }
        byte[][] rowKeyMetaData = new byte[3][];
        SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData);
        byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length];
        System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length);
        byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        PName physicalName = PNameFactory.newName(unprefixedSchemaName);
        // Reformulate key based on correct data
        newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets).getKey();
    } else {
        newBuf = new byte[length + 1];
        System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length);
        newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets);
    }
    return new KeyValue(newBuf, 0, newBuf.length,
            buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
            buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
            keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()),
            buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
 
Example 4
/**
 * Binary search for latest column value without allocating memory in the process
 * @param kvs
 * @param family
 * @param qualifier
 */
public static KeyValue getColumnLatest(List<KeyValue>kvs, byte[] family, byte[] qualifier) {
    if (kvs.size() == 0) {
    	return null;
    }
    KeyValue row = kvs.get(0);
    Comparator<KeyValue> comp = new SearchComparator(row.getBuffer(), row.getRowOffset(), row.getRowLength(), family, qualifier);
    // pos === ( -(insertion point) - 1)
    int pos = Collections.binarySearch(kvs, null, comp);
    // never will exact match
    if (pos < 0) {
      pos = (pos+1) * -1;
      // pos is now insertion point
    }
    if (pos == kvs.size()) {
      return null; // doesn't exist
    }

    KeyValue kv = kvs.get(pos);
    if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
            family, 0, family.length) != 0) {
        return null;
    }
    if (Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(),
            qualifier, 0, qualifier.length) != 0) {
        return null;
    }
    return kv;
}
 
Example 5
public ProjectedValueTuple projectResults(Tuple tuple) {
	byte[] bytesValue = schema.toBytes(tuple, expressions, valueSet, ptr);
	KeyValue base = tuple.getValue(0);
    return new ProjectedValueTuple(base.getBuffer(), base.getRowOffset(), base.getRowLength(), base.getTimestamp(), bytesValue, valueSet.getEstimatedLength());
}