Java Code Examples for org.apache.hadoop.hbase.KeyValue#getType()

The following examples show how to use org.apache.hadoop.hbase.KeyValue#getType() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Batch.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
public void add(KeyValue kv){
  if (pointDeleteCode != kv.getType()) {
    allPointDeletes = false;
  }
  batch.add(kv);
}
 
Example 2
Source File: CoveredColumnIndexer.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public Collection<Pair<Mutation, byte[]>> getIndexUpdateForFilteredRows(
    Collection<KeyValue> filtered) throws IOException {

  // stores all the return values
  IndexUpdateManager updateMap = new IndexUpdateManager();
  // batch the updates by row to make life easier and ordered
  Collection<Batch> batches = batchByRow(filtered);

  for (Batch batch : batches) {
    Put p = new Put(batch.getKvs().iterator().next().getRow());
    for (KeyValue kv : batch.getKvs()) {
      // we only need to cleanup Put entries
      byte type = kv.getType();
      Type t = KeyValue.Type.codeToType(type);
      if (!t.equals(Type.Put)) {
        continue;
      }

      // add the kv independently
      p.add(kv);
    }

    // do the usual thing as for deletes
    Collection<Batch> timeBatch = createTimestampBatchesFromMutation(p);
    LocalTableState state = new LocalTableState(env, localTable, p);
    for (Batch entry : timeBatch) {
      //just set the timestamp on the table - it already has all the future state
      state.setCurrentTimestamp(entry.getTimestamp());
      this.addDeleteUpdatesToMap(updateMap, state, entry.getTimestamp());
    }
  }
  return updateMap.toMap();
}
 
Example 3
Source File: IndexMaintainer.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public boolean isRowDeleted(Collection<KeyValue> pendingUpdates) {
    int nDeleteCF = 0;
    for (KeyValue kv : pendingUpdates) {
        if (kv.getType() == KeyValue.Type.DeleteFamily.getCode()) {
            nDeleteCF++;
            boolean isEmptyCF = Bytes.compareTo(kv.getFamily(), dataEmptyKeyValueCF) == 0;
            // This is what a delete looks like on the client side for immutable indexing...
            if (isEmptyCF) {
                return true;
            }
        }
    }
    // This is what a delete looks like on the server side for mutable indexing...
    return nDeleteCF == this.nDataCFs;
}
 
Example 4
Source File: SingleVersionDeleteNotSupported.java    From hbase-secondary-index with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Validate that a single-version delete is not used. Having to do this for now because transactional delete
 * mechansim will currently treat DeleteColumn the same as Delete which could cause confusion.
 */
public static void validateDelete(final Delete delete) throws SingleVersionDeleteNotSupported {
    Collection<List<KeyValue>> values = delete.getFamilyMap().values();
    for (List<KeyValue> value : values) {
        for (KeyValue kv : value) {
            if (Type.Delete.getCode() == kv.getType()) {
                throw new SingleVersionDeleteNotSupported();
            }
        }
    }
}
 
Example 5
Source File: Batch.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
public void add(KeyValue kv){
  if (pointDeleteCode != kv.getType()) {
    allPointDeletes = false;
  }
  batch.add(kv);
}
 
Example 6
Source File: TransactionalRegion.java    From hbase-secondary-index with GNU General Public License v3.0 4 votes vote down vote up
protected void doReconstructionLog(final Path oldCoreLogFile, final long minSeqId, final long maxSeqId,
        final Progressable reporter) throws UnsupportedEncodingException, IOException {

    Path trxPath = new Path(oldCoreLogFile.getParent(), THLog.HREGION_OLD_THLOGFILE_NAME);

    // We can ignore doing anything with the Trx Log table, it is
    // not-transactional.
    if (super.getTableDesc().getNameAsString().equals(HBaseBackedTransactionLogger.TABLE_NAME)) {
        return;
    }

    THLogRecoveryManager recoveryManager = new THLogRecoveryManager(this);
    Map<Long, WALEdit> commitedTransactionsById = recoveryManager.getCommitsFromLog(trxPath, minSeqId, reporter);

    if (commitedTransactionsById != null && commitedTransactionsById.size() > 0) {
        LOG.debug("found " + commitedTransactionsById.size() + " COMMITED transactions to recover.");

        for (Entry<Long, WALEdit> entry : commitedTransactionsById.entrySet()) {
            LOG.debug("Writing " + entry.getValue().size() + " updates for transaction " + entry.getKey());
            WALEdit b = entry.getValue();

            for (KeyValue kv : b.getKeyValues()) {
                // FIXME need to convert these into puts and deletes. Not sure this is
                // the write way.
                // Could probably combine multiple KV's into single put/delete.
                // Also timestamps?
                if (kv.getType() == KeyValue.Type.Put.getCode()) {
                    Put put = new Put();
                    put.add(kv);
                    super.put(put);
                } else if (kv.isDelete()) {
                    Delete del = new Delete(kv.getRow());
                    if (kv.isDeleteFamily()) {
                        del.deleteFamily(kv.getFamily());
                    } else if (kv.isDeleteType()) {
                        del.deleteColumn(kv.getFamily(), kv.getQualifier());
                    }
                }

            }

        }

        LOG.debug("Flushing cache"); // We must trigger a cache flush,
        // otherwise we will would ignore the log on subsequent failure
        if (!super.flushcache()) {
            LOG.warn("Did not flush cache");
        }
    }
}