Java Code Examples for org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()

The following examples show how to use org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WriteHeavyIncrementObserver.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public Result preIncrement(ObserverContext<RegionCoprocessorEnvironment> c, Increment increment)
    throws IOException {
  byte[] row = increment.getRow();
  Put put = new Put(row);
  long ts = getUniqueTimestamp(row);
  for (Map.Entry<byte[], List<Cell>> entry : increment.getFamilyCellMap().entrySet()) {
    for (Cell cell : entry.getValue()) {
      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row)
          .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())
          .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(),
            cell.getQualifierLength())
          .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())
          .setType(Cell.Type.Put).setTimestamp(ts).build());
    }
  }
  c.getEnvironment().getRegion().put(put);
  c.bypass();
  return Result.EMPTY_RESULT;
}
 
Example 2
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 3
Source File: Indexer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing
 * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a
 * real increment, though, it's really more of a Put. We translate the Increment into a
 * list of mutations, at most a single Put and Delete that are the changes upon executing
 * the list of ON DUPLICATE KEY clauses for this row.
 */
@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> e,
        final Increment inc) throws IOException {
    long start = EnvironmentEdgeManager.currentTimeMillis();
    try {
        List<Mutation> mutations = this.builder.executeAtomicOp(inc);
        if (mutations == null) {
            return null;
        }

        // Causes the Increment to be ignored as we're committing the mutations
        // ourselves below.
        e.bypass();
        // ON DUPLICATE KEY IGNORE will return empty list if row already exists
        // as no action is required in that case.
        if (!mutations.isEmpty()) {
            Region region = e.getEnvironment().getRegion();
            // Otherwise, submit the mutations directly here
              region.batchMutate(mutations.toArray(new Mutation[0]));
        }
        return Result.EMPTY_RESULT;
    } catch (Throwable t) {
        throw ServerUtil.createIOException(
                "Unable to process ON DUPLICATE IGNORE for " + 
                e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + 
                "(" + Bytes.toStringBinary(inc.getRow()) + ")", t);
    } finally {
        long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
        if (duration >= slowIndexPrepareThreshold) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock",
                        duration, slowPreIncrementThreshold));
            }
            metricSource.incrementSlowDuplicateKeyCheckCalls();
        }
        metricSource.updateDuplicateKeyCheckTime(duration);
    }
}
 
Example 4
Source File: IndexRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing
 * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a
 * real increment, though, it's really more of a Put. We translate the Increment into a
 * list of mutations, at most a single Put and Delete that are the changes upon executing
 * the list of ON DUPLICATE KEY clauses for this row.
 */
@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> e,
        final Increment inc) throws IOException {
    long start = EnvironmentEdgeManager.currentTimeMillis();
    try {
        List<Mutation> mutations = this.builder.executeAtomicOp(inc);
        if (mutations == null) {
            return null;
        }

        // Causes the Increment to be ignored as we're committing the mutations
        // ourselves below.
        e.bypass();
        // ON DUPLICATE KEY IGNORE will return empty list if row already exists
        // as no action is required in that case.
        if (!mutations.isEmpty()) {
            Region region = e.getEnvironment().getRegion();
            // Otherwise, submit the mutations directly here
              region.batchMutate(mutations.toArray(new Mutation[0]));
        }
        return Result.EMPTY_RESULT;
    } catch (Throwable t) {
        throw ServerUtil.createIOException(
                "Unable to process ON DUPLICATE IGNORE for " + 
                e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + 
                "(" + Bytes.toStringBinary(inc.getRow()) + ")", t);
    } finally {
        long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
        if (duration >= slowIndexPrepareThreshold) {
            if (LOG.isDebugEnabled()) {
                LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
            }
            metricSource.incrementSlowDuplicateKeyCheckCalls();
        }
        metricSource.updateDuplicateKeyCheckTime(duration);
    }
}
 
Example 5
Source File: WriteHeavyIncrementObserver.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result)
    throws IOException {
  Scan scan =
      new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions();
  NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums =
      new TreeMap<>(Bytes.BYTES_COMPARATOR);
  get.getFamilyMap().forEach((cf, cqs) -> {
    NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    sums.put(cf, ss);
    cqs.forEach(cq -> {
      ss.put(cq, new MutableLong(0));
      scan.addColumn(cf, cq);
    });
  });
  List<Cell> cells = new ArrayList<>();
  try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
    boolean moreRows;
    do {
      moreRows = scanner.next(cells);
      for (Cell cell : cells) {
        byte[] family = CellUtil.cloneFamily(cell);
        byte[] qualifier = CellUtil.cloneQualifier(cell);
        long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
        sums.get(family).get(qualifier).add(value);
      }
      cells.clear();
    } while (moreRows);
  }
  sums.forEach((cf, m) -> m.forEach((cq, s) -> result
      .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue()))));
  c.bypass();
}
 
Example 6
Source File: TestImportTSVWithOperationAttributes.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
    Durability durability) throws IOException {
  Region region = e.getEnvironment().getRegion();
  if (!region.getRegionInfo().isMetaRegion()
      && !region.getRegionInfo().getTable().isSystemTable()) {
    if (put.getAttribute(TEST_ATR_KEY) != null) {
      LOG.debug("allow any put to happen " + region.getRegionInfo().getRegionNameAsString());
    } else {
      e.bypass();
    }
  }
}
 
Example 7
Source File: TestMasterReplication.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Get get, final List<Cell> result) throws IOException {
  if (get.getAttribute("count") != null) {
    result.clear();
    // order is important!
    result.add(new KeyValue(count, count, delete, Bytes.toBytes(nDelete)));
    result.add(new KeyValue(count, count, put, Bytes.toBytes(nCount)));
    c.bypass();
  }
}
 
Example 8
Source File: WriteSinkCoprocessor.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
                           final MiniBatchOperationInProgress<Mutation> miniBatchOp)
    throws IOException {
  if (ops.incrementAndGet() % 20000 == 0) {
    LOG.info("Wrote " + ops.get() + " times in region " + regionName);
  }

  for (int i = 0; i < miniBatchOp.size(); i++) {
    miniBatchOp.setOperationStatus(i,
        new OperationStatus(HConstants.OperationStatusCode.SUCCESS));
  }
  c.bypass();
}
 
Example 9
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 10
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 11
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 12
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 13
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete,
    WALEdit edit, Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts,
  // which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.addColumn(family, TxConstants.FAMILY_DELETE_QUALIFIER,
        familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.addColumn(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
    deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 14
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 15
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 5 votes vote down vote up
@Override
public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit,
                      Durability durability) throws IOException {
  // Translate deletes into our own delete tombstones
  // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows
  // us to rollback the changes (by a real delete) if the transaction fails

  // Deletes that are part of a transaction rollback do not need special handling.
  // They will never be rolled back, so are performed as normal HBase deletes.
  if (isRollbackOperation(delete)) {
    return;
  }

  Transaction tx = getFromOperation(delete);
  ensureValidTxLifetime(e.getEnvironment(), delete, tx);

  // Other deletes are client-initiated and need to be translated into our own tombstones
  // TODO: this should delegate to the DeleteStrategy implementation.
  Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp());
  for (byte[] family : delete.getFamilyCellMap().keySet()) {
    List<Cell> familyCells = delete.getFamilyCellMap().get(family);
    if (isFamilyDelete(familyCells)) {
      deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(),
                        HConstants.EMPTY_BYTE_ARRAY);
    } else {
      for (Cell cell : familyCells) {
        deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(),
                          HConstants.EMPTY_BYTE_ARRAY);
      }
    }
  }
  for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
      deleteMarkers.setAttribute(entry.getKey(), entry.getValue());
  }
  e.getEnvironment().getRegion().put(deleteMarkers);
  // skip normal delete handling
  e.bypass();
}
 
Example 16
Source File: LocalIndexSplitter.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
        byte[] splitKey, List<Mutation> metaEntries) throws IOException {
    RegionCoprocessorEnvironment environment = ctx.getEnvironment();
    HTableDescriptor tableDesc = ctx.getEnvironment().getRegion().getTableDesc();
    if (SchemaUtil.isSystemTable(tableDesc.getName())) {
        return;
    }
    RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
    if (tableDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) == null
            || !Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(tableDesc
                    .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
        TableName indexTable =
                TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
        if (!MetaTableAccessor.tableExists(rss.getConnection(), indexTable)) return;

        HRegion indexRegion = IndexUtil.getIndexRegion(environment);
        if (indexRegion == null) {
            LOG.warn("Index region corresponindg to data region " + environment.getRegion()
                    + " not in the same server. So skipping the split.");
            ctx.bypass();
            return;
        }
        try {
            int encodedVersion = VersionUtil.encodeVersion(environment.getHBaseVersion());
            if(encodedVersion >= SPLIT_TXN_MINIMUM_SUPPORTED_VERSION) {
                st = new SplitTransaction(indexRegion, splitKey);
                st.useZKForAssignment =
                        environment.getConfiguration().getBoolean("hbase.assignment.usezk",
                            true);
            } else {
                st = new IndexSplitTransaction(indexRegion, splitKey);
            }

            if (!st.prepare()) {
                LOG.error("Prepare for the table " + indexRegion.getTableDesc().getNameAsString()
                    + " failed. So returning null. ");
                ctx.bypass();
                return;
            }
            indexRegion.forceSplit(splitKey);
            daughterRegions = st.stepsBeforePONR(rss, rss, false);
            HRegionInfo copyOfParent = new HRegionInfo(indexRegion.getRegionInfo());
            copyOfParent.setOffline(true);
            copyOfParent.setSplit(true);
            // Put for parent
            Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
            MetaTableAccessor.addDaughtersToPut(putParent,
                    daughterRegions.getFirst().getRegionInfo(),
                    daughterRegions.getSecond().getRegionInfo());
            metaEntries.add(putParent);
            // Puts for daughters
            Put putA = MetaTableAccessor.makePutFromRegionInfo(
                    daughterRegions.getFirst().getRegionInfo());
            Put putB = MetaTableAccessor.makePutFromRegionInfo(
                    daughterRegions.getSecond().getRegionInfo());
            st.addLocation(putA, rss.getServerName(), 1);
            st.addLocation(putB, rss.getServerName(), 1);
            metaEntries.add(putA);
            metaEntries.add(putB);
        } catch (Exception e) {
            ctx.bypass();
            LOG.warn("index region splitting failed with the exception ", e);
            if (st != null){
                st.rollback(rss, rss);
                st = null;
                daughterRegions = null;
            }
        }
    }
}
 
Example 17
Source File: LocalIndexMerger.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
        HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException {
    HTableDescriptor tableDesc = regionA.getTableDesc();
    if (SchemaUtil.isSystemTable(tableDesc.getName())) {
        return;
    }
    RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
    HRegionServer rs = (HRegionServer) rss;
    if (tableDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) == null
            || !Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(tableDesc
                    .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
        TableName indexTable =
                TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
        if (!MetaTableAccessor.tableExists(rs.getConnection(), indexTable)) return;
        HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
        if (indexRegionA == null) {
            LOG.warn("Index region corresponindg to data region " + regionA
                    + " not in the same server. So skipping the merge.");
            ctx.bypass();
            return;
        }
        HRegion indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
        if (indexRegionB == null) {
            LOG.warn("Index region corresponindg to region " + regionB
                    + " not in the same server. So skipping the merge.");
            ctx.bypass();
            return;
        }
        try {
            rmt = new RegionMergeTransaction(indexRegionA, indexRegionB, false);
            if (!rmt.prepare(rss)) {
                LOG.error("Prepare for the index regions merge [" + indexRegionA + ","
                        + indexRegionB + "] failed. So returning null. ");
                ctx.bypass();
                return;
            }
            this.mergedRegion = rmt.stepsBeforePONR(rss, rss, false);
            rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(),
                indexRegionA.getRegionInfo(), indexRegionB.getRegionInfo(),
                rss.getServerName(), metaEntries, 1);
        } catch (Exception e) {
            ctx.bypass();
            LOG.warn("index regions merge failed with the exception ", e);
            if (rmt != null) {
                rmt.rollback(rss, rss);
                rmt = null;
                mergedRegion = null;
            }
        }
    }
}
 
Example 18
Source File: VisibilityController.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Override
public void prePrepareTimeStampForDeleteVersion(
    ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation delete, Cell cell,
    byte[] byteNow, Get get) throws IOException {
  // Nothing to do if we are not filtering by visibility
  if (!authorizationEnabled) {
    return;
  }

  CellVisibility cellVisibility = null;
  try {
    cellVisibility = delete.getCellVisibility();
  } catch (DeserializationException de) {
    throw new IOException("Invalid cell visibility specified " + delete, de);
  }
  // The check for checkForReservedVisibilityTagPresence happens in preBatchMutate happens.
  // It happens for every mutation and that would be enough.
  List<Tag> visibilityTags = new ArrayList<>();
  if (cellVisibility != null) {
    String labelsExp = cellVisibility.getExpression();
    try {
      visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false,
          false);
    } catch (InvalidLabelException e) {
      throw new IOException("Invalid cell visibility specified " + labelsExp, e);
    }
  }
  get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags,
      VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT));
  List<Cell> result = ctx.getEnvironment().getRegion().get(get, false);

  if (result.size() < get.getMaxVersions()) {
    // Nothing to delete
    PrivateCellUtil.updateLatestStamp(cell, byteNow);
    return;
  }
  if (result.size() > get.getMaxVersions()) {
    throw new RuntimeException("Unexpected size: " + result.size()
        + ". Results more than the max versions obtained.");
  }
  Cell getCell = result.get(get.getMaxVersions() - 1);
  PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp());

  // We are bypassing here because in the HRegion.updateDeleteLatestVersionTimeStamp we would
  // update with the current timestamp after again doing a get. As the hook as already determined
  // the needed timestamp we need to bypass here.
  // TODO : See if HRegion.updateDeleteLatestVersionTimeStamp() could be
  // called only if the hook is not called.
  ctx.bypass();
}