Java Code Examples for com.netflix.astyanax.MutationBatch#isEmpty()

The following examples show how to use com.netflix.astyanax.MutationBatch#isEmpty() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AstyanaxDataWriterDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public void purge(AstyanaxStorage storage, Runnable progress) {
    DeltaPlacement placement = (DeltaPlacement) storage.getPlacement();
    CassandraKeyspace keyspace = placement.getKeyspace();

    // Scan all the shards and delete all the rows we find.
    MutationBatch mutation = keyspace.prepareMutationBatch(SorConsistencies.toAstyanax(WriteConsistency.STRONG));
    Iterator<String> keyIter = _keyScanner.scanKeys(storage, ReadConsistency.STRONG);
    while (keyIter.hasNext()) {
        ByteBuffer rowKey = storage.getRowKey(keyIter.next());
        mutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey).delete();
        if (mutation.getRowCount() >= 100) {
            progress.run();
            execute(mutation, "purge %d records from placement %s", mutation.getRowCount(), placement.getName());
            mutation.discardMutations();
        }
    }
    if (!mutation.isEmpty()) {
        progress.run();
        execute(mutation, "purge %d records from placement %s", mutation.getRowCount(), placement.getName());
    }
}
 
Example 2
Source File: AShardStateIO.java    From blueflood with Apache License 2.0 5 votes vote down vote up
@Override
public void putShardState(int shard, Map<Granularity, Map<Integer, UpdateStamp>> slotTimes) throws IOException {
    AstyanaxIO astyanaxIO = AstyanaxIO.singleton();
    Timer.Context ctx = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_STATE_NAME);
    try {
        MutationBatch mutationBatch = astyanaxIO.getKeyspace().prepareMutationBatch();
        ColumnListMutation<SlotState> mutation = mutationBatch.withRow(CassandraModel.CF_METRICS_STATE, (long)shard);
        for (Map.Entry<Granularity, Map<Integer, UpdateStamp>> granEntry : slotTimes.entrySet()) {
            Granularity g = granEntry.getKey();
            for (Map.Entry<Integer, UpdateStamp> entry : granEntry.getValue().entrySet()) {
                // granularity,slot,state
                SlotState slotState = new SlotState(g, entry.getKey(), entry.getValue().getState());
                mutation.putColumn(slotState, entry.getValue().getTimestamp());
                /*
                  Note: this method used to set the timestamp of the Cassandra column to entry.getValue().getTimestamp() * 1000, i.e. the collection time.
                  That implementation was changed because it could cause delayed metrics not to rollup.
                  Consider you are getting out of order metrics M1 and M2, with collection times T1 and T2 with T2>T1, belonging to same slot
                  Assume M2 arrives first. The slot gets marked active and rolled up and the state is set as Rolled. Now, assume M1 arrives. We update the slot state to active,
                  set the slot timestamp to T1, and while persisting we set it, we set the column timestamp to be T1*1000, but because the T1 < T2, Cassandra wasn't updating it.
                 */
            }
        }
        if (!mutationBatch.isEmpty())
            try {
                mutationBatch.execute();
            } catch (ConnectionException e) {
                Instrumentation.markWriteError(e);
                LOG.error("Error persisting shard state", e);
                throw new IOException(e);
            }
    } finally {
        ctx.stop();
    }
}
 
Example 3
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 4 votes vote down vote up
private void purge(AstyanaxStorage storage, boolean deleteMetadata, boolean deleteData, Runnable progress) {
    BlobPlacement placement = (BlobPlacement) storage.getPlacement();
    CassandraKeyspace keyspace = placement.getKeyspace();
    ColumnFamily<ByteBuffer, Composite> cf = placement.getBlobColumnFamily();

    // Limit the query to a single column since we mainly just want the row keys (but not zero columns because
    // then we couldn't distinguish a live row from a row that has been deleted already).
    ByteBufferRange columnRange = new RangeBuilder().setLimit(1).build();

    MutationBatch mutation = keyspace.prepareMutationBatch(CONSISTENCY_STRONG);

    LimitCounter unlimited = LimitCounter.max();

    // Range query all the shards and delete all the rows we find.
    Iterator<ByteBufferRange> scanIter = storage.scanIterator(null);
    while (scanIter.hasNext()) {
        ByteBufferRange keyRange = scanIter.next();
        Iterator<Row<ByteBuffer, Composite>> rowIter = scanInternal(placement, keyRange, columnRange, unlimited);
        while (rowIter.hasNext()) {
            Row<ByteBuffer, Composite> row = rowIter.next();
            if (row.getColumns().isEmpty()) {
                continue;  // don't bother deleting range ghosts
            }

            if (deleteMetadata && deleteData) {
                mutation.withRow(cf, row.getKey()).delete();
            } else {
                if (deleteMetadata) {
                    mutation.withRow(cf, row.getKey())
                            .deleteColumn(getColumn(ColumnGroup.A, 0));
                }

                if (deleteData) {
                    mutation.withRow(cf, row.getKey())
                            .deleteColumn(getColumn(ColumnGroup.B, 1))
                            .deleteColumn(getColumn(ColumnGroup.Z, 2));
                }
            }

            if (mutation.getRowCount() >= 100) {
                progress.run();
                execute(mutation);
                mutation.discardMutations();
            }
        }
    }
    if (!mutation.isEmpty()) {
        progress.run();
        execute(mutation);
    }
}
 
Example 4
Source File: AstyanaxDataWriterDAO.java    From emodb with Apache License 2.0 4 votes vote down vote up
private void write(BatchKey batchKey, List<BatchUpdate> updates, UpdateListener listener) {
    // Invoke the configured listener.  This is used to write events to the databus.
    listener.beforeWrite(Collections2.transform(updates, BatchUpdate::getUpdate));

    DeltaPlacement placement = batchKey.getPlacement();
    MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(batchKey.getConsistency());
    int approxMutationSize = 0;
    int updateCount = 0;

    for (BatchUpdate batchUpdate : updates) {
        AstyanaxStorage storage = batchUpdate.getStorage();
        RecordUpdate update = batchUpdate.getUpdate();
        ByteBuffer rowKey = storage.getRowKey(update.getKey());

        Delta delta = update.getDelta();
        String deltaString = delta.toString();
        Set<String> tags = update.getTags();

        // Set any change flags which may make reading this delta back more efficient.  Currently the only case
        // for this is for a literal map delta.
        EnumSet<ChangeFlag> changeFlags = EnumSet.noneOf(ChangeFlag.class);
        if (delta.isConstant()) {
            changeFlags.add(ChangeFlag.CONSTANT_DELTA);
        }
        if (delta instanceof MapDelta || (delta instanceof Literal && ((Literal) delta).getValue() instanceof Map)) {
            changeFlags.add(ChangeFlag.MAP_DELTA);
        }

        // Regardless of migration stage, we will still encode both deltas versions

        // The values are encoded in a flexible format that allows versioning of the strings
        ByteBuffer encodedBlockDelta = stringToByteBuffer(_changeEncoder.encodeDelta(deltaString, changeFlags, tags, new StringBuilder(_deltaPrefix)).toString());
        ByteBuffer encodedDelta = encodedBlockDelta.duplicate();
        encodedDelta.position(encodedDelta.position() + _deltaPrefixLength);

        int blockDeltaSize = encodedBlockDelta.remaining();

        UUID changeId = update.getChangeId();

        // Validate sizes of individual deltas
        if (blockDeltaSize > MAX_DELTA_SIZE) {
            _oversizeUpdateMeter.mark();
            throw new DeltaSizeLimitException("Delta exceeds size limit of " + MAX_DELTA_SIZE + ": " + blockDeltaSize, blockDeltaSize);
        }

        // Perform a quick validation that the size of the mutation batch as a whole won't exceed the thrift threshold.
        // This validation is inexact and overly-conservative but it is cheap and fast.
        if (!mutation.isEmpty() && approxMutationSize + blockDeltaSize > MAX_DELTA_SIZE) {
            // Adding the next row may exceed the Thrift threshold.  Check definitively now.  This is fairly expensive
            // which is why we don't do it unless the cheap check above passes.
            MutationBatch potentiallyOversizeMutation = placement.getKeyspace().prepareMutationBatch(batchKey.getConsistency());
            potentiallyOversizeMutation.mergeShallow(mutation);

            putBlockedDeltaColumn(potentiallyOversizeMutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey), changeId, encodedBlockDelta);

            if (getMutationBatchSize(potentiallyOversizeMutation) >= MAX_THRIFT_FRAMED_TRANSPORT_SIZE) {
                // Execute the mutation batch now.  As a side-effect this empties the mutation batch
                // so we can continue using the same instance.
                execute(mutation, "batch update %d records in placement %s", updateCount, placement.getName());
                approxMutationSize = 0;
                updateCount = 0;
            }
        }

        putBlockedDeltaColumn(mutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey), changeId, encodedBlockDelta);
        approxMutationSize += blockDeltaSize;

        updateCount += 1;
    }

    execute(mutation, "batch update %d records in placement %s", updateCount, placement.getName());

    // Invoke the configured listener.  This is used to write audits.
    listener.afterWrite(Collections2.transform(updates, BatchUpdate::getUpdate));

    _updateMeter.mark(updates.size());
}