Java Code Examples for me.prettyprint.hector.api.mutation.Mutator#addDeletion()

The following examples show how to use me.prettyprint.hector.api.mutation.Mutator#addDeletion() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Cassandra12xMapDAO.java    From cumulusrdf with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void delete(final K... keys) {

	if (keys == null || keys.length == 0) {
		return;
	}

	final Mutator<K> m = createMutator(_keyspace, _serializer_k);

	for (final K key : keys) {
		m.addDeletion(key, _cf_name, COLUMN_NAME, BYTE_SERIALIZER);
	}

	m.execute();
}
 
Example 2
Source File: SecondaryIndexDeletionBuffer.java    From cumulusrdf with Apache License 2.0 6 votes vote down vote up
/**
 * Flushes the buffer.
 * 
 * @param mutator The mutator to fill with deletions.
 * @param columnFamilyName The column family (name) to delete from.
 * @param column The column to delete from.
 * @param serializer The serializer to use for the column.
 * @param dao the rdf index data access object.
 * @param <T> The type of the column key.
 * @return True if the buffer was flushed and at least one element was checked for deletion, false otherwise.
 * @throws DataAccessLayerException in case of data access failure.
 */
<T> boolean flush(
		final Mutator<byte[]> mutator, 
		final String columnFamilyName, 
		final T column, 
		final Serializer<T> serializer,
		final TripleIndexDAO dao) throws DataAccessLayerException {
	if (_candidates.size() == 0) {
		return false;
	}
	
	for (SecondaryIndexDeletionCandidate candidate : _candidates) {
		if (!dao.query(candidate.getQuery(), 1).hasNext()) {
			mutator.addDeletion(candidate.getRow(), columnFamilyName, column, serializer);
		}
	}
	
	return true;
}
 
Example 3
Source File: CassandraMQUtils.java    From usergrid with Apache License 2.0 6 votes vote down vote up
public static Mutator<ByteBuffer> addMessageToMutator( Mutator<ByteBuffer> m, Message message, long timestamp ) {

        Map<ByteBuffer, ByteBuffer> columns = serializeMessage( message );

        if ( columns == null ) {
            return m;
        }

        for ( Map.Entry<ByteBuffer, ByteBuffer> column_entry : columns.entrySet() ) {
            if ( ( column_entry.getValue() != null ) && column_entry.getValue().hasRemaining() ) {
                HColumn<ByteBuffer, ByteBuffer> column =
                        createColumn( column_entry.getKey(), column_entry.getValue(), timestamp, be, be );
                m.addInsertion( bytebuffer( message.getUuid() ), QueuesCF.MESSAGE_PROPERTIES.toString(), column );
            }
            else {
                m.addDeletion( bytebuffer( message.getUuid() ), QueuesCF.MESSAGE_PROPERTIES.toString(),
                        column_entry.getKey(), be, timestamp );
            }
        }

        return m;
    }
 
Example 4
Source File: CassandraMQUtils.java    From usergrid with Apache License 2.0 6 votes vote down vote up
public static Mutator<ByteBuffer> addQueueToMutator( Mutator<ByteBuffer> m, Queue queue, long timestamp ) {

        Map<ByteBuffer, ByteBuffer> columns = serializeQueue( queue );

        if ( columns == null ) {
            return m;
        }

        for ( Map.Entry<ByteBuffer, ByteBuffer> column_entry : columns.entrySet() ) {
            if ( ( column_entry.getValue() != null ) && column_entry.getValue().hasRemaining() ) {
                HColumn<ByteBuffer, ByteBuffer> column =
                        createColumn( column_entry.getKey(), column_entry.getValue(), timestamp, be, be );
                m.addInsertion( bytebuffer( queue.getUuid() ), QueuesCF.QUEUE_PROPERTIES.toString(), column );
            }
            else {
                m.addDeletion( bytebuffer( queue.getUuid() ), QueuesCF.QUEUE_PROPERTIES.toString(),
                        column_entry.getKey(), be, timestamp );
            }
        }

        return m;
    }
 
Example 5
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 5 votes vote down vote up
/**
 * Deletes a user by userName.
 */
@Override
public void doDeleteUser(String userName) throws UserStoreException {

    Mutator<Composite> mutator = HFactory.createMutator(keyspace, CompositeSerializer.get());
    String[] roles = doGetExternalRoleListOfUser(userName, "");
    for (String role : roles) {
        Composite key = new Composite();
        key.addComponent(role, stringSerializer);
        key.addComponent(tenantIdString, stringSerializer);
        ColumnFamilyTemplate<Composite, String> userCFTemplate = new ThriftColumnFamilyTemplate<Composite, String>(
                keyspace, CFConstants.UM_ROLE_USER_INDEX, CompositeSerializer.get(), StringSerializer.get());
        try {
            userCFTemplate.deleteColumn(key, userName);
        } catch (HectorException e) {
            log.error("Error during deletion ", e);
        }
    }

    Composite userKey = new Composite();
    userKey.addComponent(userName, stringSerializer);
    userKey.addComponent(tenantIdString, stringSerializer);
    mutator.addDeletion(userKey, CFConstants.UM_USER_ROLE, null, CompositeSerializer.get());
    mutator.addDeletion(userKey, CFConstants.UM_USER, null, CompositeSerializer.get());
    mutator.execute();

    if (log.isDebugEnabled()) {
        log.debug("Deleted user " + userName + " successfully");
    }
}
 
Example 6
Source File: QueueManagerImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
public void batchUnsubscribeFromQueue( Mutator<ByteBuffer> batch, String publisherQueuePath, UUID publisherQueueId,
                                       String subscriberQueuePath, UUID subscriberQueueId, long timestamp ) {

    batch.addDeletion( bytebuffer( publisherQueueId ), QUEUE_SUBSCRIBERS.getColumnFamily(), subscriberQueuePath, se,
            timestamp );

    batch.addDeletion( bytebuffer( subscriberQueueId ), QUEUE_SUBSCRIPTIONS.getColumnFamily(), publisherQueuePath,
            se, timestamp );
}
 
Example 7
Source File: ConsumerTransaction.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/** Delete the specified transaction */
private void deleteTransaction( UUID queueId, UUID consumerId, UUID transactionId )
{

    Mutator<ByteBuffer> mutator = CountingMutator.createFlushingMutator( ko, be );
    ByteBuffer key = getQueueClientTransactionKey( queueId, consumerId );

    mutator.addDeletion( key, CONSUMER_QUEUE_TIMEOUTS.getColumnFamily(), transactionId, ue,
            cass.createTimestamp() );

    mutator.execute();
}
 
Example 8
Source File: ConsumerTransaction.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 * Delete all re-read transaction pointers
 *
 * @param pointers The list of transaction pointers
 * @param maxIndex The index to stop at (exclusive)
 * @param queueId The queue id
 * @param consumerId The consumer id
 */
protected void deleteTransactionPointers( List<TransactionPointer> pointers, int maxIndex, UUID queueId,
                                          UUID consumerId )
{

    if ( maxIndex == 0 || pointers.size() == 0 )
    {
        return;
    }

    Mutator<ByteBuffer> mutator = CountingMutator.createFlushingMutator( ko, be );
    ByteBuffer key = getQueueClientTransactionKey( queueId, consumerId );

    for ( int i = 0; i < maxIndex && i < pointers.size(); i++ )
    {
        UUID pointer = pointers.get( i ).expiration;

        if ( logger.isTraceEnabled() )
        {
            logger.trace( "Removing transaction pointer '{}' for queue '{}' and consumer '{}'",
                    pointer, queueId, consumerId
            );
        }

        mutator.addDeletion( key, CONSUMER_QUEUE_TIMEOUTS.getColumnFamily(), pointer, ue, cass.createTimestamp() );
    }

    mutator.execute();
}
 
Example 9
Source File: CassandraPersistenceUtils.java    From usergrid with Apache License 2.0 5 votes vote down vote up
public static void addDeleteToMutator( Mutator<ByteBuffer> m, Object columnFamily, Object key, Object columnName,
                                       long timestamp ) throws Exception {

    logBatchOperation( "Delete", columnFamily, key, columnName, null, timestamp );

    if ( columnName instanceof List<?> ) {
        columnName = DynamicComposite.toByteBuffer( ( List<?> ) columnName );
    }

    m.addDeletion( bytebuffer( key ), columnFamily.toString(), bytebuffer( columnName ), be, timestamp );
}
 
Example 10
Source File: PerfDataAccessor.java    From oneops with Apache License 2.0 4 votes vote down vote up
public void purgeMetrics(long time, String bucket) {

        int rowCount = 0;
        int totalColCount = 0;
        int totalColDeleted = 0;

        Long start = 1L;
        Long end = time;

        // safeguard not to delete anything in past week
        long now = System.currentTimeMillis() / 1000;
        logger.info("       now: " + now);
        logger.info("startEpoch: " + time);

        if (time + (60 * 60 * 24 * 7) > now) {
            logger.error("input time too soon - cannot be within past week");
            return;
        }

        int maxColumns = (int) (end - start);
        int pageSize = 1000;
        String lastKey = null;

        Mutator<byte[]> mutator = HFactory.createMutator(keyspace, bytesSerializer);

        RangeSlicesQuery<String, Long, Double> query = HFactory
                .createRangeSlicesQuery(keyspace, stringSerializer, longSerializer, doubleSerializer)
                .setColumnFamily(DATA_CF).setReturnKeysOnly()
                .setRowCount(pageSize);

        while (true) {
            query.setKeys(lastKey, null);

            QueryResult<OrderedRows<String, Long, Double>> result = query.execute();
            OrderedRows<String, Long, Double> orderedRows = result.get();
            Iterator<Row<String, Long, Double>> rowsIterator = orderedRows.iterator();

            // we'll skip this first one, since it is the same as the last one
            // from previous time we executed
            if (lastKey != null && rowsIterator != null)
                rowsIterator.next();

            while (rowsIterator.hasNext()) {
                Row<String, Long, Double> row = rowsIterator.next();

                if (!row.getKey().endsWith("-" + bucket)) {
                    continue;
                }

                rowCount++;
                lastKey = row.getKey();

                List<byte[]> keys = new ArrayList<byte[]>();
                keys.add(row.getKey().getBytes());

                MultigetSliceQuery<byte[], Long, Double> multigetSliceQuery = HFactory
                        .createMultigetSliceQuery(keyspace, bytesSerializer, longSerializer, doubleSerializer)
                        .setColumnFamily(DATA_CF).setKeys(keys)
                        .setRange(start, end, false, maxColumns);

                QueryResult<Rows<byte[], Long, Double>> colResult = multigetSliceQuery.execute();
                Rows<byte[], Long, Double> rows = colResult.get();

                int sampleCount = 0;
                int deletedCount = 0;
                for (Row<byte[], Long, Double> rowResult : rows) {

                    List<HColumn<Long, Double>> cols = rowResult.getColumnSlice().getColumns();
                    Iterator<HColumn<Long, Double>> listIter = cols.listIterator();

                    while (listIter.hasNext()) {
                        HColumn<Long, Double> c = (HColumn<Long, Double>) listIter.next();

                        if (c.getName() < time) {
                            mutator.addDeletion(row.getKey().getBytes(), DATA_CF, c.getName(), longSerializer);
                            deletedCount++;
                        }
                        sampleCount++;
                    }

                    totalColDeleted += deletedCount;
                    totalColCount += sampleCount;

                    mutator.execute();
                }

                logger.info(row.getKey() + ": " + sampleCount + " deleted: " + deletedCount);
                if (rows.getCount() < pageSize)
                    break;

            }
            logger.info("rows: " + rowCount + " cols: " + totalColCount
                    + " deleted: " + totalColDeleted);

            if (orderedRows.getCount() < pageSize)
                break;

        }

    }
 
Example 11
Source File: QueueManagerImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
public QueueIndexUpdate batchUpdateQueueIndex( QueueIndexUpdate indexUpdate, UUID subcriptionQueueId )
        throws Exception {

    if (logger.isTraceEnabled()) {
        logger.trace("batchUpdateQueueIndex");
    }

    Mutator<ByteBuffer> batch = indexUpdate.getBatch();

    // queue_id,prop_name
    Object index_key = key( subcriptionQueueId, indexUpdate.getEntryName() );

    // subscription_queue_id,subscriber_queue_id,prop_name

    for ( QueueIndexEntry entry : indexUpdate.getPrevEntries() ) {

        if ( entry.getValue() != null ) {

            index_key = key( subcriptionQueueId, entry.getPath() );

            batch.addDeletion( bytebuffer( index_key ), PROPERTY_INDEX.getColumnFamily(), entry.getIndexComposite(),
                    dce, indexUpdate.getTimestamp() );
        }
        else {
            logger.error( "Unexpected condition - deserialized property value is null" );
        }
    }

    if ( indexUpdate.getNewEntries().size() > 0 ) {

        for ( QueueIndexEntry indexEntry : indexUpdate.getNewEntries() ) {

            index_key = key( subcriptionQueueId, indexEntry.getPath() );

            batch.addInsertion( bytebuffer( index_key ), PROPERTY_INDEX.getColumnFamily(),
                    createColumn( indexEntry.getIndexComposite(), ByteBuffer.allocate( 0 ),
                            indexUpdate.getTimestamp(), dce, be ) );
        }
    }

    for ( String index : indexUpdate.getIndexesSet() ) {
        batch.addInsertion( bytebuffer( key( subcriptionQueueId, DICTIONARY_SUBSCRIBER_INDEXES ) ),
                QUEUE_DICTIONARIES.getColumnFamily(),
                createColumn( index, ByteBuffer.allocate( 0 ), indexUpdate.getTimestamp(), se, be ) );
    }

    return indexUpdate;
}