Java Code Examples for com.netflix.astyanax.MutationBatch#mergeShallow()

The following examples show how to use com.netflix.astyanax.MutationBatch#mergeShallow() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MutationBatch removeIdTypeToTarget( final ApplicationScope scope, final Id targetNode, final String type,
                                           final String idType, final long timestamp ) {
    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp ) );
        aggregateBatch.mergeShallow( migration.to.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp ) );

        return aggregateBatch;
    }

    return migration.to.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp );
}
 
Example 2
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MutationBatch removeEdgeTypeToTarget( final ApplicationScope scope, final Id targetNode, final String type,
                                             final long timestamp ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeEdgeTypeToTarget( scope, targetNode, type, timestamp ) );
        aggregateBatch.mergeShallow( migration.to.removeEdgeTypeToTarget( scope, targetNode, type, timestamp ) );

        return aggregateBatch;
    }

    return migration.to.removeEdgeTypeToTarget( scope, targetNode, type, timestamp );
}
 
Example 3
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MutationBatch removeIdTypeFromSource( final ApplicationScope scope, final Id sourceNode, final String type,
                                             final String idType, final long timestamp ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch
                .mergeShallow( migration.from.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp ) );
        aggregateBatch.mergeShallow( migration.to.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp ) );

        return aggregateBatch;
    }

    return migration.to.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp );
}
 
Example 4
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MutationBatch removeEdgeTypeFromSource( final ApplicationScope scope, final Id sourceNode, final String type,
                                               final long timestamp ) {
    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp ) );
        aggregateBatch.mergeShallow( migration.to.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp ) );

        return aggregateBatch;
    }

    return migration.to.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp );
}
 
Example 5
Source File: MvccLogEntrySerializationProxyImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Override
public MutationBatch delete( final ApplicationScope applicationScope, final Id entityId, final UUID version ) {
    final MigrationRelationship<MvccLogEntrySerializationStrategy> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.delete( applicationScope, entityId, version ) );
        aggregateBatch.mergeShallow( migration.to.delete( applicationScope, entityId, version ) );

        return aggregateBatch;
    }

    return migration.to.delete( applicationScope, entityId, version );
}
 
Example 6
Source File: ShardedEdgeSerializationImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
/**
 * Create a mutation batch
 */
public MutationBatch createBatch( final ApplicationScope scope, final Collection<Shard> shards,
                                  final UUID opTimestamp ) {

    final MutationBatch batch =
            keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
                    .withTimestamp( opTimestamp.timestamp() );


    final C column = getDirectedEdge();
    final MultiTenantColumnFamily<ScopedRowKey<R>, C> columnFamily = getColumnFamily();
    final boolean isDeleted = isDeleted();


    for ( Shard shard : shards ) {
        final R rowKey = getRowKey( shard );
        writeEdge( batch, columnFamily, scope, rowKey, column, shard, isDeleted );

        if(logger.isTraceEnabled() && getDirectedEdge() instanceof DirectedEdge){
            DirectedEdge directedEdge = (DirectedEdge) getDirectedEdge();
            if( shard != null && shard.getShardEnd().isPresent()
                && directedEdge.timestamp > shard.getShardEnd().get().timestamp){

                logger.trace("Writing edge past shard end for edge: {}, shard: {}", directedEdge, shard );

            }
        }

        // if an edge is being written to this shard, un-delete it in case it was previously marked
        // don't un-delete if the edge write is to actually remove an edge
        // Usergrid allows entities to be written with a UUID generated from the past (time)
        if(shard.isDeleted() && !isDeleted) {
            logger.info("Shard is deleted. Un-deleting as new data is being written to the shard - {}", shard);
            shard.setDeleted(false);
            batch.mergeShallow(edgeShardSerialization.writeShardMeta(scope, shard, getDirectedEdgeMeta()));
        }

    }


    return batch;
}
 
Example 7
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch removeIdTypeToTarget( final ApplicationScope scope, final Edge edge ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeIdTypeToTarget( scope, edge ) );
        aggregateBatch.mergeShallow( migration.to.removeIdTypeToTarget( scope, edge ) );

        return aggregateBatch;
    }

    return migration.to.removeIdTypeToTarget( scope, edge );
}
 
Example 8
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch removeEdgeTypeToTarget( final ApplicationScope scope, final Edge edge ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeEdgeTypeToTarget( scope, edge ) );
        aggregateBatch.mergeShallow( migration.to.removeEdgeTypeToTarget( scope, edge ) );

        return aggregateBatch;
    }

    return migration.to.removeEdgeTypeToTarget( scope, edge );
}
 
Example 9
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch removeIdTypeFromSource( final ApplicationScope scope, final Edge edge ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.removeIdTypeFromSource( scope, edge ) );
        aggregateBatch.mergeShallow( migration.to.removeIdTypeFromSource( scope, edge ) );

        return aggregateBatch;
    }

    return migration.to.removeIdTypeFromSource( scope, edge );
}
 
Example 10
Source File: EdgeMetadataSerializationProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch writeEdge( final ApplicationScope scope, final Edge edge ) {

    final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();


    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.writeEdge( scope, edge ) );
        aggregateBatch.mergeShallow( migration.to.writeEdge( scope, edge ) );

        return aggregateBatch;
    }

    return migration.to.writeEdge( scope, edge );
}
 
Example 11
Source File: AstyanaxDataWriterDAO.java    From emodb with Apache License 2.0 4 votes vote down vote up
private void write(BatchKey batchKey, List<BatchUpdate> updates, UpdateListener listener) {
    // Invoke the configured listener.  This is used to write events to the databus.
    listener.beforeWrite(Collections2.transform(updates, BatchUpdate::getUpdate));

    DeltaPlacement placement = batchKey.getPlacement();
    MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(batchKey.getConsistency());
    int approxMutationSize = 0;
    int updateCount = 0;

    for (BatchUpdate batchUpdate : updates) {
        AstyanaxStorage storage = batchUpdate.getStorage();
        RecordUpdate update = batchUpdate.getUpdate();
        ByteBuffer rowKey = storage.getRowKey(update.getKey());

        Delta delta = update.getDelta();
        String deltaString = delta.toString();
        Set<String> tags = update.getTags();

        // Set any change flags which may make reading this delta back more efficient.  Currently the only case
        // for this is for a literal map delta.
        EnumSet<ChangeFlag> changeFlags = EnumSet.noneOf(ChangeFlag.class);
        if (delta.isConstant()) {
            changeFlags.add(ChangeFlag.CONSTANT_DELTA);
        }
        if (delta instanceof MapDelta || (delta instanceof Literal && ((Literal) delta).getValue() instanceof Map)) {
            changeFlags.add(ChangeFlag.MAP_DELTA);
        }

        // Regardless of migration stage, we will still encode both deltas versions

        // The values are encoded in a flexible format that allows versioning of the strings
        ByteBuffer encodedBlockDelta = stringToByteBuffer(_changeEncoder.encodeDelta(deltaString, changeFlags, tags, new StringBuilder(_deltaPrefix)).toString());
        ByteBuffer encodedDelta = encodedBlockDelta.duplicate();
        encodedDelta.position(encodedDelta.position() + _deltaPrefixLength);

        int blockDeltaSize = encodedBlockDelta.remaining();

        UUID changeId = update.getChangeId();

        // Validate sizes of individual deltas
        if (blockDeltaSize > MAX_DELTA_SIZE) {
            _oversizeUpdateMeter.mark();
            throw new DeltaSizeLimitException("Delta exceeds size limit of " + MAX_DELTA_SIZE + ": " + blockDeltaSize, blockDeltaSize);
        }

        // Perform a quick validation that the size of the mutation batch as a whole won't exceed the thrift threshold.
        // This validation is inexact and overly-conservative but it is cheap and fast.
        if (!mutation.isEmpty() && approxMutationSize + blockDeltaSize > MAX_DELTA_SIZE) {
            // Adding the next row may exceed the Thrift threshold.  Check definitively now.  This is fairly expensive
            // which is why we don't do it unless the cheap check above passes.
            MutationBatch potentiallyOversizeMutation = placement.getKeyspace().prepareMutationBatch(batchKey.getConsistency());
            potentiallyOversizeMutation.mergeShallow(mutation);

            putBlockedDeltaColumn(potentiallyOversizeMutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey), changeId, encodedBlockDelta);

            if (getMutationBatchSize(potentiallyOversizeMutation) >= MAX_THRIFT_FRAMED_TRANSPORT_SIZE) {
                // Execute the mutation batch now.  As a side-effect this empties the mutation batch
                // so we can continue using the same instance.
                execute(mutation, "batch update %d records in placement %s", updateCount, placement.getName());
                approxMutationSize = 0;
                updateCount = 0;
            }
        }

        putBlockedDeltaColumn(mutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey), changeId, encodedBlockDelta);
        approxMutationSize += blockDeltaSize;

        updateCount += 1;
    }

    execute(mutation, "batch update %d records in placement %s", updateCount, placement.getName());

    // Invoke the configured listener.  This is used to write audits.
    listener.afterWrite(Collections2.transform(updates, BatchUpdate::getUpdate));

    _updateMeter.mark(updates.size());
}
 
Example 12
Source File: EdgeSerializationImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch deleteEdge( final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp ) {
    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateEdge( markedEdge );
    ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );

    final long now = timeService.getCurrentTime();
    final Id sourceNode = markedEdge.getSourceNode();
    final Id targetNode = markedEdge.getTargetNode();
    final String edgeType = markedEdge.getType();
    final long edgeTimestamp = markedEdge.getTimestamp();

    /**
     * Source write
     */
    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode( sourceNode, edgeType );

    final Collection<Shard> sourceWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( now );

    final MutationBatch batch = shardedEdgeSerialization
            .deleteEdgeFromSource( edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta,
                    timestamp );


    /**
     * Source with target  type write
     */
    final DirectedEdgeMeta sourceTargetTypeEdgeMeta =
            DirectedEdgeMeta.fromSourceNodeTargetType( sourceNode, edgeType, targetNode.getType() );

    final Collection<Shard> sourceTargetTypeWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceTargetTypeEdgeMeta )
                             .getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .deleteEdgeFromSourceWithTargetType( edgeColumnFamilies, scope, markedEdge, sourceTargetTypeWriteShards,
                    sourceTargetTypeEdgeMeta, timestamp ) );


    /**
     * Target write
     *
     */

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode( targetNode, edgeType );

    final Collection<Shard> targetWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .deleteEdgeToTarget( edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta,
                    timestamp ) );


    /**
     * Target with source type write
     */

    final DirectedEdgeMeta targetSourceTypeEdgeMeta =
            DirectedEdgeMeta.fromTargetNodeSourceType( targetNode, edgeType, sourceNode.getType() );

    final Collection<Shard> targetSourceTypeWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetSourceTypeEdgeMeta )
                             .getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .deleteEdgeToTargetWithSourceType( edgeColumnFamilies, scope, markedEdge, targetSourceTypeWriteShards,
                    targetSourceTypeEdgeMeta, timestamp ) );


    /**
     * Version write
     */

    final DirectedEdgeMeta edgeVersionsMeta = DirectedEdgeMeta.fromEdge( sourceNode, targetNode, edgeType );

    final Collection<Shard> edgeVersionsShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .deleteEdgeVersions( edgeColumnFamilies, scope, markedEdge, edgeVersionsShards,
                    edgeVersionsMeta, timestamp ) );


    return batch;
}
 
Example 13
Source File: EdgeSerializationImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch writeEdge( final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp ) {

    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateEdge( markedEdge );
    ValidationUtils.verifyTimeUuid( timestamp, "timestamp" );

    final long now = timeService.getCurrentTime();
    final Id sourceNode = markedEdge.getSourceNode();
    final Id targetNode = markedEdge.getTargetNode();
    final String edgeType = markedEdge.getType();
    final long edgeTimestamp = markedEdge.getTimestamp();

    /**
     * Source write
     */
    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode( sourceNode, edgeType );

    final Collection<Shard> sourceWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceEdgeMeta ).getWriteShards( now );

    final MutationBatch batch = shardedEdgeSerialization
            .writeEdgeFromSource( edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta,
                    timestamp );


    /**
     * Source with target  type write
     */
    final DirectedEdgeMeta sourceTargetTypeEdgeMeta =
            DirectedEdgeMeta.fromSourceNodeTargetType( sourceNode, edgeType, targetNode.getType() );

    final Collection<Shard> sourceTargetTypeWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, sourceTargetTypeEdgeMeta )
                             .getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .writeEdgeFromSourceWithTargetType( edgeColumnFamilies, scope, markedEdge, sourceTargetTypeWriteShards,
                    sourceTargetTypeEdgeMeta, timestamp ) );


    /**
     * Target write
     *
     */

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode( targetNode, edgeType );

    final Collection<Shard> targetWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetEdgeMeta ).getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .writeEdgeToTarget( edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta,
                    timestamp ) );


    /**
     * Target with source type write
     */

    final DirectedEdgeMeta targetSourceTypeEdgeMeta =
            DirectedEdgeMeta.fromTargetNodeSourceType( targetNode, edgeType, sourceNode.getType() );

    final Collection<Shard> targetSourceTypeWriteShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, targetSourceTypeEdgeMeta )
                             .getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .writeEdgeToTargetWithSourceType( edgeColumnFamilies, scope, markedEdge, targetSourceTypeWriteShards,
                    targetSourceTypeEdgeMeta, timestamp ) );


    /**
     * Version write
     */

    final DirectedEdgeMeta edgeVersionsMeta = DirectedEdgeMeta.fromEdge( sourceNode, targetNode, edgeType );

    final Collection<Shard> edgeVersionsShards =
            edgeShardStrategy.getWriteShards( scope, edgeTimestamp, edgeVersionsMeta ).getWriteShards( now );

    batch.mergeShallow( shardedEdgeSerialization
            .writeEdgeVersions( edgeColumnFamilies, scope, markedEdge, edgeVersionsShards,
                    edgeVersionsMeta, timestamp ) );


    return batch;
}
 
Example 14
Source File: WriteCommit.java    From usergrid with Apache License 2.0 4 votes vote down vote up
private CollectionIoEvent<MvccEntity> confirmUniqueFields(CollectionIoEvent<MvccEntity> ioEvent) {
    final MvccEntity mvccEntity = ioEvent.getEvent();
    MvccValidationUtils.verifyMvccEntityWithEntity( mvccEntity );

    final Id entityId = mvccEntity.getId();
    final UUID version = mvccEntity.getVersion();
    final ApplicationScope applicationScope = ioEvent.getEntityCollection();

    //set the version into the entity
    final Entity entity = mvccEntity.getEntity().get();

    EntityUtils.setVersion( entity, version );

    MvccValidationUtils.verifyMvccEntityWithEntity( ioEvent.getEvent() );
    ValidationUtils.verifyTimeUuid( version ,"version" );

    final MvccLogEntry startEntry =
        new MvccLogEntryImpl( entityId, version, Stage.COMMITTED, MvccLogEntry.State.COMPLETE );



    MutationBatch logMutation = logEntryStrat.write( applicationScope, startEntry );

    // now get our actual insert into the entity data
    MutationBatch entityMutation = entityStrat.write( applicationScope, mvccEntity );

    // merge the 2 into 1 mutation
    logMutation.mergeShallow( entityMutation );

    // akkaFig may be null when this is called from JUnit tests
    if ( actorSystemFig != null && actorSystemFig.getEnabled() && uniqueValuesFig.getUnqiueValueViaCluster() ) {
        String authoritativeRegion = ioEvent.getAuthoritativeRegion();
        if ( StringUtils.isEmpty(authoritativeRegion) ) {
            authoritativeRegion = uniqueValuesFig.getAuthoritativeRegion();
        }
        if ( StringUtils.isEmpty(authoritativeRegion) ) {
            authoritativeRegion = actorSystemFig.getRegionLocal();
        }
        confirmUniqueFieldsAkka( mvccEntity, version, applicationScope, authoritativeRegion );
    } else {
        confirmUniqueFields( mvccEntity, version, applicationScope, logMutation );
    }

    try {
        logMutation.execute();
    }
    catch ( ConnectionException e ) {
        logger.error( "Failed to execute write asynchronously ", e );
        throw new WriteCommitException( mvccEntity, applicationScope,
            "Failed to execute write asynchronously ", e );
    }

    return ioEvent;
}
 
Example 15
Source File: MvccEntitySerializationStrategyProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch delete( final ApplicationScope context, final Id entityId, final UUID version ) {

    final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.delete( context, entityId, version ) );
        aggregateBatch.mergeShallow( migration.to.delete( context, entityId, version ) );

        return aggregateBatch;
    }

    return migration.to.delete( context, entityId, version );
}
 
Example 16
Source File: MvccEntitySerializationStrategyProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch mark( final ApplicationScope context, final Id entityId, final UUID version ) {

    final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.mark( context, entityId, version ) );
        aggregateBatch.mergeShallow( migration.to.mark( context, entityId, version ) );

        return aggregateBatch;
    }

    return migration.to.mark( context, entityId, version );
}
 
Example 17
Source File: MvccEntitySerializationStrategyProxyImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public MutationBatch write( final ApplicationScope context, final MvccEntity entity ) {

    final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();

    if ( migration.needsMigration() ) {
        final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();

        aggregateBatch.mergeShallow( migration.from.write( context, entity ) );
        aggregateBatch.mergeShallow( migration.to.write( context, entity ) );

        return aggregateBatch;
    }

    return migration.to.write( context, entity );
}
 
Example 18
Source File: EdgeSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Tests mixing 2 edge types between 2 nodes.  We should get results for the same source->destination with the 2
 * edge types
 */
@Test
@Category(StressTest.class)
public void testIteratorPaging() throws ConnectionException {


    final Id sourceId = IdGenerator.createId( "source" );
    final String edgeType = "edge";
    final Id targetId = IdGenerator.createId( "target" );


    int writeCount = graphFig.getScanPageSize() * 3;


    final MutationBatch batch = keyspace.prepareMutationBatch();

    long timestamp = 10000l;

    for ( int i = 0; i < writeCount; i++ ) {

        final MarkedEdge edge = createEdge( sourceId, edgeType, targetId, timestamp );

        batch.mergeShallow( serialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ) );

        //increment timestamp (not done inline on purpose) If we do System.currentMillis we get the same edge on
        // fast systems
        timestamp++;
    }

    logger.info( "Flushing edges" );
    batch.execute();


    Iterator<MarkedEdge> results = serialization
            .getEdgeVersions( scope, createGetByEdge( sourceId, edgeType, targetId, timestamp, null ) );

    verify( results, writeCount );


    //get them all from source
    results = serialization.getEdgesFromSource( scope, createSearchByEdge( sourceId, edgeType, timestamp, null ) );

    verify( results, writeCount );


    results = serialization.getEdgesFromSourceByTargetType( scope,
            createSearchByEdgeAndId( sourceId, edgeType, timestamp, targetId.getType(), null ) );

    verify( results, writeCount );


    results = serialization.getEdgesToTarget( scope, createSearchByEdge( targetId, edgeType, timestamp, null ) );

    verify( results, writeCount );


    results = serialization.getEdgesToTargetBySourceType( scope,
            createSearchByEdgeAndId( targetId, edgeType, timestamp, sourceId.getType(), null ) );

    verify( results, writeCount );
}
 
Example 19
Source File: EdgeShardSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
@Test
public void saveReturn() throws ConnectionException {

    final Id now = IdGenerator.createId( "test" );

    final long timestamp = 10000l;

    final Shard shard1 = new Shard( 1000l, timestamp, false );

    final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );

    final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );

    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );

    MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );

    batch.execute();


    Iterator<Shard> results =
            edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );


    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertEquals( shard1, results.next() );


    assertFalse( results.hasNext() );

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType( now, "edgeType", "subType" );

    //test we get nothing with the other node type
    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), targetEdgeMeta );

    assertFalse( results.hasNext() );


    //test paging and size
    results = edgeShardSerialization.getShardMetaData( scope, Optional.of( shard2 ), sourceEdgeMeta );

    assertEquals( shard2, results.next() );


    assertEquals( shard1, results.next() );


    assertFalse( results.hasNext() );
}
 
Example 20
Source File: EdgeShardSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
@Test
public void saveReturnDelete() throws ConnectionException {

    final Id now = IdGenerator.createId( "test" );


    final long timestamp = 10000l;

    final Shard shard1 = new Shard( 1000l, timestamp, false );

    final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );

    final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );


    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );


    MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );

    batch.execute();


    Iterator<Shard> results =
            edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertEquals( shard1, results.next() );

    assertFalse( results.hasNext() );

    //test nothing with other type

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType( now, "edgeType", "subType" );

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), targetEdgeMeta );

    assertFalse( results.hasNext() );


    //test paging and size
    edgeShardSerialization.removeShardMeta( scope, shard1, sourceEdgeMeta ).execute();

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertFalse( results.hasNext() );


    edgeShardSerialization.removeShardMeta( scope, shard2, sourceEdgeMeta ).execute();

    edgeShardSerialization.removeShardMeta( scope, shard3, sourceEdgeMeta ).execute();

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );


    assertFalse( results.hasNext() );
}