Java Code Examples for org.apache.hadoop.hbase.KeyValue#getValueLength()

The following examples show how to use org.apache.hadoop.hbase.KeyValue#getValueLength() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexManagementUtil.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
public static ValueGetter createGetterFromKeyValues(Collection<KeyValue> pendingUpdates) {
    final Map<ReferencingColumn, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates
            .size());
    for (KeyValue kv : pendingUpdates) {
        // create new pointers to each part of the kv
        ImmutableBytesPtr family = new ImmutableBytesPtr(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength());
        ImmutableBytesPtr qual = new ImmutableBytesPtr(kv.getBuffer(), kv.getQualifierOffset(),
                kv.getQualifierLength());
        ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
        valueMap.put(new ReferencingColumn(family, qual), value);
    }
    return new ValueGetter() {
        @Override
        public ImmutableBytesPtr getLatestValue(ColumnReference ref) throws IOException {
            return valueMap.get(ReferencingColumn.wrap(ref));
        }
    };
}
 
Example 2
Source File: SchemaUtil.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private static KeyValue upgradeTo3(KeyValue keyValue) {
    byte[] buf = keyValue.getBuffer();
    int newLength = keyValue.getRowLength() + 1;
    byte[] newKey = new byte[newLength];
    newKey[0] = QueryConstants.SEPARATOR_BYTE;
    System.arraycopy(buf, keyValue.getRowOffset(), newKey, 1, keyValue.getRowLength());
    byte[] valueBuf = updateValueIfNecessary(keyValue);
    int valueOffset = keyValue.getValueOffset();
    int valueLength = keyValue.getValueLength();
    if (valueBuf != buf) {
        valueOffset = 0;
        valueLength = valueBuf.length;
    }
    return new KeyValue(newKey, 0, newLength,
            buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
            buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
            keyValue.getTimestamp(), Type.codeToType(keyValue.getType()),
            valueBuf, valueOffset, valueLength);
}
 
Example 3
Source File: Sequence.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
public boolean returnValue(Result result) throws SQLException {
    KeyValue statusKV = result.raw()[0];
    if (statusKV.getValueLength() == 0) { // No error, but unable to return sequence values
        return false;
    }
    long timestamp = statusKV.getTimestamp();
    int statusCode = PDataType.INTEGER.getCodec().decodeInt(statusKV.getBuffer(), statusKV.getValueOffset(), null);
    if (statusCode == SUCCESS) {  // Success - update nextValue down to currentValue
        SequenceValue value = findSequenceValue(timestamp);
        if (value == null) {
            throw new EmptySequenceCacheException(key.getSchemaName(),key.getSequenceName());
        }
        value.nextValue = value.currentValue;
        return true;
    }
    SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode);
    // TODO: We could have the server return the timestamps of the
    // delete markers and we could insert them here, but this seems
    // like overkill.
    // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) {
    // }
    throw new SQLExceptionInfo.Builder(code)
        .setSchemaName(key.getSchemaName())
        .setTableName(key.getSequenceName())
        .build().buildException();
}
 
Example 4
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) {
    byte[] buf = keyValue.getBuffer();
    int length = keyValue.getRowLength();
    int offset = keyValue.getRowOffset();
    boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
    if (!isViewSeq && nSaltBuckets == 0) {
        return null;
    }
    byte[] newBuf;
    if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it
        if (buf[length-1] == 0) { // Global indexes on views have trailing null byte
            length--;
        }
        byte[][] rowKeyMetaData = new byte[3][];
        SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData);
        byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length];
        System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length);
        byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        PName physicalName = PNameFactory.newName(unprefixedSchemaName);
        // Reformulate key based on correct data
        newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets).getKey();
    } else {
        newBuf = new byte[length + 1];
        System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length);
        newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets);
    }
    return new KeyValue(newBuf, 0, newBuf.length,
            buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
            buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
            keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()),
            buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
 
Example 5
Source File: TestReplicationWithTags.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
    final WALEdit edit, final Durability durability) throws IOException {
  byte[] attribute = put.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<>();
  if (attribute != null) {
    for (List<? extends Cell> edits : put.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = CellUtil.cloneFamily(kv);
        }
        Tag tag = new ArrayBackedTag(TAG_TYPE, attribute);
        List<Tag> tagList = new ArrayList<>(1);
        tagList.add(tag);

        KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(),
            CellUtil.cloneFamily(kv), 0, kv.getFamilyLength(), CellUtil.cloneQualifier(kv), 0,
            kv.getQualifierLength(), kv.getTimestamp(),
            KeyValue.Type.codeToType(kv.getTypeByte()), CellUtil.cloneValue(kv), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    put.getFamilyCellMap().remove(cf);
    // Update the family map
    put.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
Example 6
Source File: TestTags.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void updateMutationAddingTags(final Mutation m) {
  byte[] attribute = m.getAttribute("visibility");
  byte[] cf = null;
  List<Cell> updatedCells = new ArrayList<>();
  if (attribute != null) {
    for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
      for (Cell cell : edits) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
        if (cf == null) {
          cf = CellUtil.cloneFamily(kv);
        }
        Tag tag = new ArrayBackedTag((byte) 1, attribute);
        List<Tag> tagList = new ArrayList<>();
        tagList.add(tag);

        KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(),
            CellUtil.cloneFamily(kv), 0, kv.getFamilyLength(), CellUtil.cloneQualifier(kv), 0,
            kv.getQualifierLength(), kv.getTimestamp(),
            KeyValue.Type.codeToType(kv.getTypeByte()), CellUtil.cloneValue(kv), 0,
            kv.getValueLength(), tagList);
        ((List<Cell>) updatedCells).add(newKV);
      }
    }
    m.getFamilyCellMap().remove(cf);
    // Update the family map
    m.getFamilyCellMap().put(cf, updatedCells);
  }
}
 
Example 7
Source File: LazyValueGetter.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * @param ref
 * @return the first value on the scanner for the given column
 */
private ImmutableBytesPtr get(ColumnReference ref) throws IOException {
  KeyValue first = ref.getFirstKeyValueForRow(row);
  if (!scan.seek(first)) {
    return null;
  }
  // there is a next value - we only care about the current value, so we can just snag that
  KeyValue next = scan.next();
  if (ref.matches(next)) {
    return new ImmutableBytesPtr(next.getBuffer(), next.getValueOffset(), next.getValueLength());
  }
  return null;
}
 
Example 8
Source File: DataBlockEncodingTool.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Check statistics for given HFile for different data block encoders.
 * @param scanner Of file which will be compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @throws IOException thrown if scanner is invalid
 */
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  scanner.seek(KeyValue.LOWESTKEY);

  KeyValue currentKV;

  byte[] previousKey = null;
  byte[] currentKey;

  DataBlockEncoding[] encodings = DataBlockEncoding.values();

  ByteArrayOutputStream uncompressedOutputStream =
      new ByteArrayOutputStream();

  int j = 0;
  while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    j++;
    currentKey = currentKV.getKey();
    if (previousKey != null) {
      for (int i = 0; i < previousKey.length && i < currentKey.length &&
          previousKey[i] == currentKey[i]; ++i) {
        totalKeyRedundancyLength++;
      }
    }

    // Add tagsLen zero to cells don't include tags. Since the process of
    // scanner converts byte array to KV would abandon tagsLen part if tagsLen
    // is zero. But we still needs the tagsLen part to check if current cell
    // include tags. If USE_TAG is true, HFile contains cells with tags,
    // if the cell tagsLen equals 0, it means other cells may have tags.
    if (USE_TAG && currentKV.getTagsLength() == 0) {
      uncompressedOutputStream.write(currentKV.getBuffer(),
          currentKV.getOffset(), currentKV.getLength());
      // write tagsLen = 0.
      uncompressedOutputStream.write(Bytes.toBytes((short) 0));
    } else {
      uncompressedOutputStream.write(currentKV.getBuffer(),
          currentKV.getOffset(), currentKV.getLength());
    }

    if(includesMemstoreTS) {
      WritableUtils.writeVLong(
          new DataOutputStream(uncompressedOutputStream), currentKV.getSequenceId());
    }

    previousKey = currentKey;

    int kLen = currentKV.getKeyLength();
    int vLen = currentKV.getValueLength();
    int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
    int restLen = currentKV.getLength() - kLen - vLen;

    totalKeyLength += kLen;
    totalValueLength += vLen;
    totalPrefixLength += restLen;
    totalCFLength += cfLen;
  }

  rawKVs = uncompressedOutputStream.toByteArray();
  for (DataBlockEncoding encoding : encodings) {
    if (encoding == DataBlockEncoding.NONE) {
      continue;
    }
    DataBlockEncoder d = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder()
        .withDataBlockEncoding(encoding)
        .withCompression(Compression.Algorithm.NONE)
        .withIncludesMvcc(includesMemstoreTS)
        .withIncludesTags(USE_TAG).build();
    codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
  }
}
 
Example 9
Source File: TestHFileSeek.java    From hbase with Apache License 2.0 4 votes vote down vote up
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    HFileContext context = new HFileContextBuilder()
                          .withBlockSize(options.minBlockSize)
                          .withCompression(HFileWriterImpl.compressionByName(options.compress))
                          .build();
    Writer writer = HFile.getWriterFactoryNoCache(conf)
        .withOutputStream(fout)
        .withFileContext(context)
        .create();
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        byte [] k = new byte [key.getLength()];
        System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
        byte [] v = new byte [val.getLength()];
        System.arraycopy(val.getBytes(), 0, v, 0, key.getLength());
        KeyValue kv = new KeyValue(k, CF, QUAL, v);
        writer.append(kv);
        totalBytes += kv.getKeyLength();
        totalBytes += kv.getValueLength();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
Example 10
Source File: TestBufferedDataBlockEncoder.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testKVCodecWithTagsForDecodedCellsWithNoTags() throws Exception {
  KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("1"));
  // kv1.getKey() return a copy of the Key bytes which starts from RK_length. Means from offsets,
  // we need to reduce the KL and VL parts.
  OnheapDecodedCell c1 = new OnheapDecodedCell(kv1.getKey(), kv1.getRowLength(),
      kv1.getFamilyOffset() - KeyValue.ROW_OFFSET, kv1.getFamilyLength(),
      kv1.getQualifierOffset() - KeyValue.ROW_OFFSET, kv1.getQualifierLength(),
      kv1.getTimestamp(), kv1.getTypeByte(), kv1.getValueArray(), kv1.getValueOffset(),
      kv1.getValueLength(), kv1.getSequenceId(), kv1.getTagsArray(), kv1.getTagsOffset(),
      kv1.getTagsLength());
  KeyValue kv2 = new KeyValue(Bytes.toBytes("r2"), Bytes.toBytes("f"), Bytes.toBytes("2"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("2"));
  OnheapDecodedCell c2 = new OnheapDecodedCell(kv2.getKey(), kv2.getRowLength(),
      kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(),
      kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(),
      kv2.getTimestamp(), kv2.getTypeByte(), kv2.getValueArray(), kv2.getValueOffset(),
      kv2.getValueLength(), kv2.getSequenceId(), kv2.getTagsArray(), kv2.getTagsOffset(),
      kv2.getTagsLength());
  KeyValue kv3 = new KeyValue(Bytes.toBytes("r3"), Bytes.toBytes("cf"), Bytes.toBytes("qual"),
      HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3"));
  BufferedDataBlockEncoder.OffheapDecodedExtendedCell
      c3 = new BufferedDataBlockEncoder.OffheapDecodedExtendedCell(ByteBuffer.wrap(kv2.getKey()),
      kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(),
      kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(),
      kv2.getTimestamp(), kv2.getTypeByte(), ByteBuffer.wrap(kv2.getValueArray()),
      kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(),
      ByteBuffer.wrap(kv2.getTagsArray()), kv2.getTagsOffset(), kv2.getTagsLength());
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  KeyValueCodecWithTags codec = new KeyValueCodecWithTags();
  Encoder encoder = codec.getEncoder(os);
  encoder.write(c1);
  encoder.write(c2);
  encoder.write(c3);
  ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray());
  Decoder decoder = codec.getDecoder(is);
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c1, decoder.current()));
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c2, decoder.current()));
  assertTrue(decoder.advance());
  assertTrue(CellUtil.equals(c3, decoder.current()));
  assertFalse(decoder.advance());
}
 
Example 11
Source File: IndexUtil.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
public static List<Mutation> generateIndexData(final PTable table, PTable index,
        List<Mutation> dataMutations, ImmutableBytesWritable ptr, KeyValueBuilder builder)
        throws SQLException {
    try {
        IndexMaintainer maintainer = index.getIndexMaintainer(table);
        maintainer.setKvBuilder(builder);
        List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
       for (final Mutation dataMutation : dataMutations) {
            long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
            ptr.set(dataMutation.getRow());
            if (dataMutation instanceof Put) {
                // TODO: is this more efficient than looking in our mutation map
                // using the key plus finding the PColumn?
                ValueGetter valueGetter = new ValueGetter() {
    
                    @Override
                    public ImmutableBytesPtr getLatestValue(ColumnReference ref) {
                        // Always return null for our empty key value, as this will cause the index
                        // maintainer to always treat this Put as a new row.
                        if (isEmptyKeyValue(table, ref)) {
                            return null;
                        }
                        Map<byte [], List<KeyValue>> familyMap = dataMutation.getFamilyMap();
                        byte[] family = ref.getFamily();
                        List<KeyValue> kvs = familyMap.get(family);
                        if (kvs == null) {
                            return null;
                        }
                        byte[] qualifier = ref.getQualifier();
                        for (KeyValue kv : kvs) {
                            if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 &&
                                Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
                                return new ImmutableBytesPtr(kv.getBuffer(), kv.getValueOffset(), kv.getValueLength());
                            }
                        }
                        return null;
                    }
                    
                };
                indexMutations.add(maintainer.buildUpdateMutation(valueGetter, ptr, ts));
            } else {
                if (!maintainer.getIndexedColumns().isEmpty()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_DELETE_IF_IMMUTABLE_INDEX).setSchemaName(table.getSchemaName().getString())
                    .setTableName(table.getTableName().getString()).build().buildException();
                }
                indexMutations.add(maintainer.buildDeleteMutation(ptr, ts));
            }
        }
        return indexMutations;
    } catch (IOException e) {
        throw new SQLException(e);
    }
}