Java Code Examples for com.netflix.astyanax.MutationBatch#execute()

The following examples show how to use com.netflix.astyanax.MutationBatch#execute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: V003.java    From mutagen-cassandra with Apache License 2.0 6 votes vote down vote up
@Override
protected void performMutation(Context context) {
	context.debug("Executing mutation {}",state.getID());
	final ColumnFamily<String,String> CF_TEST1=
		ColumnFamily.newColumnFamily("Test1",
			StringSerializer.get(),StringSerializer.get());

	MutationBatch batch=getKeyspace().prepareMutationBatch();
	batch.withRow(CF_TEST1,"row2")
		.putColumn("value1","chicken")
		.putColumn("value2","sneeze");

	try {
		batch.execute();
	}
	catch (ConnectionException e) {
		throw new MutagenException("Could not update columnfamily Test1",e);
	}
}
 
Example 2
Source File: SimpleReverseIndexer.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
    public void tagId(String id, Map<String, String> tags) throws IndexerException  {
        MutationBatch mb = keyspace.prepareMutationBatch();
        
        ColumnListMutation<String> idRow = mb.withRow(dataCf, id);
        UUID uuid = TimeUUIDUtils.getUniqueTimeUUIDinMicros();
        for (Map.Entry<String, String> tag : tags.entrySet()) {
            String rowkey = tag.getKey() + "=" + tag.getValue();
            System.out.println("Rowkey: " + rowkey);
            mb.withRow(indexCf, tag.getKey() + "=" + tag.getValue())
              .putEmptyColumn(new IndexEntry(id, uuid));
//            idRow.putColumn(tag.getKey(), tag.getValue());
        }
        
        try {
            mb.execute();
        } catch (ConnectionException e) {
            throw new IndexerException("Failed to store tags : " + tags + " for id " + id, e);
        }
    }
 
Example 3
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteRow(String key) throws PaasException {
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key)).delete();
    
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example 4
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
public void updateRow(String key, RowData rowData) throws PaasException {
    LOG.info("Update row: " + rowData.toString());
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    if (rowData.hasSchemalessRows()) {
        ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key));
        for (Entry<String, Map<String, String>> row : rowData.getSrows().getRows().entrySet()) {
            for (Entry<String, String> column : row.getValue().entrySet()) {
                mbRow.putColumn(serializers.columnAsByteBuffer(column.getKey()),  
                                serializers.valueAsByteBuffer(column.getKey(), column.getValue()));
            }
        }
    }
    
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example 5
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void updateColumn(String key, String column, String value) throws NotFoundException, PaasException {
    LOG.info("Update row");
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key));
    mbRow.putColumn(serializers.columnAsByteBuffer(column),  
                    serializers.valueAsByteBuffer(column, value));
    
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example 6
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteColumn(String key, String column) throws PaasException {
    LOG.info("Update row");
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key));
    mbRow.deleteColumn(serializers.columnAsByteBuffer(column));
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example 7
Source File: ALocatorIO.java    From blueflood with Apache License 2.0 5 votes vote down vote up
/**
 * Insert a locator with key = shard long value calculated using Util.getShard()
 * @param locator
 * @throws IOException
 */
@Override
public void insertLocator(Locator locator) throws IOException {
    Timer.Context timer = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_LOCATOR_NAME);
    try {
        MutationBatch mutationBatch = AstyanaxIO.getKeyspace().prepareMutationBatch();
        AstyanaxWriter.getInstance().insertLocator(locator, mutationBatch);
        mutationBatch.execute();
    } catch (Exception e) {
        throw new IOException(e);
    } finally {
        timer.stop();
    }
}
 
Example 8
Source File: AShardStateIO.java    From blueflood with Apache License 2.0 5 votes vote down vote up
@Override
public void putShardState(int shard, Map<Granularity, Map<Integer, UpdateStamp>> slotTimes) throws IOException {
    AstyanaxIO astyanaxIO = AstyanaxIO.singleton();
    Timer.Context ctx = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_STATE_NAME);
    try {
        MutationBatch mutationBatch = astyanaxIO.getKeyspace().prepareMutationBatch();
        ColumnListMutation<SlotState> mutation = mutationBatch.withRow(CassandraModel.CF_METRICS_STATE, (long)shard);
        for (Map.Entry<Granularity, Map<Integer, UpdateStamp>> granEntry : slotTimes.entrySet()) {
            Granularity g = granEntry.getKey();
            for (Map.Entry<Integer, UpdateStamp> entry : granEntry.getValue().entrySet()) {
                // granularity,slot,state
                SlotState slotState = new SlotState(g, entry.getKey(), entry.getValue().getState());
                mutation.putColumn(slotState, entry.getValue().getTimestamp());
                /*
                  Note: this method used to set the timestamp of the Cassandra column to entry.getValue().getTimestamp() * 1000, i.e. the collection time.
                  That implementation was changed because it could cause delayed metrics not to rollup.
                  Consider you are getting out of order metrics M1 and M2, with collection times T1 and T2 with T2>T1, belonging to same slot
                  Assume M2 arrives first. The slot gets marked active and rolled up and the state is set as Rolled. Now, assume M1 arrives. We update the slot state to active,
                  set the slot timestamp to T1, and while persisting we set it, we set the column timestamp to be T1*1000, but because the T1 < T2, Cassandra wasn't updating it.
                 */
            }
        }
        if (!mutationBatch.isEmpty())
            try {
                mutationBatch.execute();
            } catch (ConnectionException e) {
                Instrumentation.markWriteError(e);
                LOG.error("Error persisting shard state", e);
                throw new IOException(e);
            }
    } finally {
        ctx.stop();
    }
}
 
Example 9
Source File: AstyanaxWriter.java    From blueflood with Apache License 2.0 5 votes vote down vote up
public void insertRollups(List<SingleRollupWriteContext> writeContexts) throws ConnectionException {
    if (writeContexts.size() == 0) {
        return;
    }
    Timer.Context ctx = Instrumentation.getBatchWriteTimerContext(writeContexts.get(0).getDestinationCF().getName());
    MutationBatch mb = keyspace.prepareMutationBatch();
    for (SingleRollupWriteContext writeContext : writeContexts) {
        Rollup rollup = writeContext.getRollup();
        int ttl = (int)TTL_PROVIDER.getTTL(
                writeContext.getLocator().getTenantId(),
                writeContext.getGranularity(),
                writeContext.getRollup().getRollupType()).get().toSeconds();
        AbstractSerializer serializer = Serializers.serializerFor(rollup.getClass());
        try {
            mb.withRow(writeContext.getDestinationCF(), writeContext.getLocator())
                    .putColumn(writeContext.getTimestamp(),
                            rollup,
                            serializer,
                            ttl);
        } catch (RuntimeException ex) {
            // let's not let stupidness prevent the rest of this put.
            log.warn(String.format("Cannot save %s", writeContext.getLocator().toString()), ex);
        }
    }
    try {
        mb.execute();
    } catch (ConnectionException e) {
        Instrumentation.markWriteError(e);
        log.error("Error writing rollup batch", e);
        throw e;
    } finally {
        ctx.stop();
    }
}
 
Example 10
Source File: MvccEntityDataMigrationImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
protected void executeBatch(final int targetVersion, final MutationBatch batch, final ProgressObserver po,
                            final AtomicLong count, com.datastax.driver.core.BatchStatement uniqueBatch) {
    try {

        batch.execute();
        session.execute(uniqueBatch);


        po.update( targetVersion, "Finished copying " + count + " entities to the new format" );
    }
    catch ( ConnectionException e ) {
        po.failed( targetVersion, "Failed to execute mutation in cassandra" );
        throw new DataMigrationException( "Unable to migrate batches ", e );
    }
}
 
Example 11
Source File: NodeShardAllocationImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Override
public Iterator<ShardEntryGroup> getShards(final ApplicationScope scope,
                                           final DirectedEdgeMeta directedEdgeMeta) {

    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateDirectedEdgeMeta( directedEdgeMeta );

    Iterator<Shard> existingShards;

    //its a new node, it doesn't need to check cassandra, it won't exist
    if ( isNewNode( directedEdgeMeta ) ) {
        existingShards = Collections.singleton( Shard.MIN_SHARD ).iterator();
    }

    else {
        existingShards = edgeShardSerialization.getShardMetaData( scope, Optional.absent(), directedEdgeMeta );

        /**
         * We didn't get anything out of cassandra, so we need to create the minimum shard
         */
        if ( existingShards == null || !existingShards.hasNext() ) {

            final MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, Shard.MIN_SHARD, directedEdgeMeta );
            try {
                batch.execute();
            }
            catch ( ConnectionException e ) {
                throw new RuntimeException( "Unable to connect to casandra", e );
            }

            existingShards = Collections.singleton( Shard.MIN_SHARD ).iterator();
        }
    }

    return new ShardEntryGroupIterator( existingShards, graphFig.getShardMinDelta(), shardGroupCompaction, scope,
        directedEdgeMeta );
}
 
Example 12
Source File: InstanceDataDAOCassandra.java    From Raigad with Apache License 2.0 5 votes vote down vote up
private void getLock(RaigadInstance instance) throws Exception {
    String choosingkey = getChoosingKey(instance);
    MutationBatch m = bootKeyspace.prepareMutationBatch();
    ColumnListMutation<String> clm = m.withRow(CF_LOCKS, choosingkey);

    // Expire in 6 sec
    clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(6));
    m.execute();
    int count = bootKeyspace.prepareQuery(CF_LOCKS).getKey(choosingkey).getCount().execute().getResult();
    if (count > 1) {
        // Need to delete my entry
        m.withRow(CF_LOCKS, choosingkey).deleteColumn(instance.getInstanceId());
        m.execute();
        throw new Exception(String.format("More than 1 contender for lock %s %d", choosingkey, count));
    }

    String lockKey = getLockingKey(instance);
    OperationResult<ColumnList<String>> result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
    if (result.getResult().size() > 0 && !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) {
        throw new Exception(String.format("Lock already taken %s", lockKey));
    }

    clm = m.withRow(CF_LOCKS, lockKey);
    clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(600));
    m.execute();
    Thread.sleep(100);
    result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute();
    if (result.getResult().size() == 1 && result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) {
        logger.info("Got lock " + lockKey);
        return;
    }
    else {
        throw new Exception(String.format("Cannot insert lock %s", lockKey));
    }
}
 
Example 13
Source File: WriteCommit.java    From usergrid with Apache License 2.0 5 votes vote down vote up
private void confirmUniqueFields(
    MvccEntity mvccEntity, UUID version, ApplicationScope scope, MutationBatch logMutation) {

    final Entity entity = mvccEntity.getEntity().get();

    // re-write the unique values but this time with no TTL
    final BatchStatement uniqueBatch = new BatchStatement();

    for ( Field field : EntityUtils.getUniqueFields(mvccEntity.getEntity().get()) ) {

            UniqueValue written  = new UniqueValueImpl( field, entity.getId(), version);

            uniqueBatch.add(uniqueValueStrat.writeCQL(scope,  written, -1 ));

            logger.debug("Finalizing {} unique value {}", field.getName(), field.getValue().toString());


    }

    try {
        logMutation.execute();
        session.execute(uniqueBatch);
    }
    catch ( ConnectionException e ) {
        logger.error( "Failed to execute write asynchronously ", e );
        throw new WriteCommitException( mvccEntity, scope,
            "Failed to execute write asynchronously ", e );
    }
}
 
Example 14
Source File: MarkCommit.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
    public void call( final CollectionIoEvent<MvccEntity> idIoEvent ) {

        final MvccEntity entity = idIoEvent.getEvent();

        MvccValidationUtils.verifyMvccEntityOptionalEntity( entity );

        final Id entityId = entity.getId();
        final UUID version = entity.getVersion();


        final ApplicationScope applicationScope = idIoEvent.getEntityCollection();


        logger.debug("Inserting tombstone for entity {} at version {}", entityId, version );

        final MvccLogEntry startEntry =
                new MvccLogEntryImpl( entityId, version, Stage.COMMITTED, MvccLogEntry.State.DELETED );

        final MutationBatch entityStateBatch = logStrat.write( applicationScope, startEntry );

        //insert a "cleared" value into the versions.  Post processing should actually delete

        try {
            final MutationBatch entityBatch = entityStrat.mark( applicationScope, entityId, version );
            entityStateBatch.mergeShallow( entityBatch );
            entityStateBatch.execute();
        }
        catch ( ConnectionException e ) {
            throw new RuntimeException( "Unable to mark entry as deleted" );
        }

        // TODO: do we need this or can we rely on UniqueCleanup + Cassandra replication?
//
//        // actorSystemFig may be null in testing
//        if ( actorSystemFig != null && actorSystemFig.getEnabled() ) {
//
//            String region = idIoEvent.getAuthoritativeRegion();
//            if ( region == null ) {
//                region = uniqueValuesFig.getAuthoritativeRegion();
//            }
//            if ( region == null ) {
//                region = actorSystemFig.getRegionLocal();
//            }
//
//            try {
//                uniqueValuesService.releaseUniqueValues( applicationScope, entityId, version, region );
//            } catch (UniqueValueException e) {
//                throw new RuntimeException( "Unable to release unique values for entity " + entityId );
//            }
//        }
    }
 
Example 15
Source File: WriteCommit.java    From usergrid with Apache License 2.0 4 votes vote down vote up
private CollectionIoEvent<MvccEntity> confirmUniqueFields(CollectionIoEvent<MvccEntity> ioEvent) {
    final MvccEntity mvccEntity = ioEvent.getEvent();
    MvccValidationUtils.verifyMvccEntityWithEntity( mvccEntity );

    final Id entityId = mvccEntity.getId();
    final UUID version = mvccEntity.getVersion();
    final ApplicationScope applicationScope = ioEvent.getEntityCollection();

    //set the version into the entity
    final Entity entity = mvccEntity.getEntity().get();

    EntityUtils.setVersion( entity, version );

    MvccValidationUtils.verifyMvccEntityWithEntity( ioEvent.getEvent() );
    ValidationUtils.verifyTimeUuid( version ,"version" );

    final MvccLogEntry startEntry =
        new MvccLogEntryImpl( entityId, version, Stage.COMMITTED, MvccLogEntry.State.COMPLETE );



    MutationBatch logMutation = logEntryStrat.write( applicationScope, startEntry );

    // now get our actual insert into the entity data
    MutationBatch entityMutation = entityStrat.write( applicationScope, mvccEntity );

    // merge the 2 into 1 mutation
    logMutation.mergeShallow( entityMutation );

    // akkaFig may be null when this is called from JUnit tests
    if ( actorSystemFig != null && actorSystemFig.getEnabled() && uniqueValuesFig.getUnqiueValueViaCluster() ) {
        String authoritativeRegion = ioEvent.getAuthoritativeRegion();
        if ( StringUtils.isEmpty(authoritativeRegion) ) {
            authoritativeRegion = uniqueValuesFig.getAuthoritativeRegion();
        }
        if ( StringUtils.isEmpty(authoritativeRegion) ) {
            authoritativeRegion = actorSystemFig.getRegionLocal();
        }
        confirmUniqueFieldsAkka( mvccEntity, version, applicationScope, authoritativeRegion );
    } else {
        confirmUniqueFields( mvccEntity, version, applicationScope, logMutation );
    }

    try {
        logMutation.execute();
    }
    catch ( ConnectionException e ) {
        logger.error( "Failed to execute write asynchronously ", e );
        throw new WriteCommitException( mvccEntity, applicationScope,
            "Failed to execute write asynchronously ", e );
    }

    return ioEvent;
}
 
Example 16
Source File: AbstractCassandraMutation.java    From mutagen-cassandra with Apache License 2.0 4 votes vote down vote up
/**
	 * Performs the actual mutation and then updates the recorded schema version
	 *
	 */
	@Override
	public final void mutate(Context context)
			throws MutagenException {

		// Perform the mutation
		performMutation(context);

		int version=getResultingState().getID();

		String change=getChangeSummary();
		if (change==null) {
			change="";
		}

		String changeHash=md5String(change);

		// The straightforward way, without locking
		try {
			MutationBatch batch=getKeyspace().prepareMutationBatch();
			batch
				.withRow(CassandraSubject.VERSION_CF,
					CassandraSubject.ROW_KEY)
				.putColumn(CassandraSubject.VERSION_COLUMN,version);

			batch
				.withRow(CassandraSubject.VERSION_CF,
					String.format("%08d",version))
				.putColumn("change",change)
				.putColumn("hash",changeHash);

			batch.execute();
		}
		catch (ConnectionException e) {
			throw new MutagenException("Could not update \"schema_version\" "+
				"column family to state "+version+
				"; schema is now out of sync with recorded version",e);
		}

// TAF: Why does this fail with a StaleLockException? Do we need to use a
// separate lock table?

//		// Attempt to acquire a lock to update the version
//		ColumnPrefixDistributedRowLock<String> lock =
//			new ColumnPrefixDistributedRowLock<String>(getKeyspace(),
//					CassandraSubject.VERSION_CF,CassandraSubject.VERSION_COLUMN)
//				.withBackoff(new BoundedExponentialBackoff(250, 10000, 10))
//				.expireLockAfter(1, TimeUnit.SECONDS)
////				.failOnStaleLock(false);
//				.failOnStaleLock(true);
//
//		try {
//			lock.acquire();
//		}
//		catch (StaleLockException e) {
//			// Won't happen
//			throw new MutagenException("Could not update "+
//				"\"schema_version\" column family to state "+version+
//				" because lock expired",e);
//		}
//		catch (BusyLockException e) {
//			throw new MutagenException("Could not update "+
//				"\"schema_version\" column family to state "+version+
//				" because another client is updating the recorded version",e);
//		}
//		catch (Exception e) {
//			if (e instanceof RuntimeException) {
//				throw (RuntimeException)e;
//			}
//			else {
//				throw new MutagenException("Could not update "+
//					"\"schema_version\" column family to state "+version+
//					" because a write lock could not be obtained",e);
//			}
//		}
//		finally {
//			try {
//				MutationBatch batch=getKeyspace().prepareMutationBatch();
//				batch.withRow(CassandraSubject.VERSION_CF,
//						CassandraSubject.ROW_KEY)
//					.putColumn(CassandraSubject.VERSION_COLUMN,version);
//
//				// Release and update
//				lock.releaseWithMutation(batch);
//			}
//			catch (Exception e) {
//				if (e instanceof RuntimeException) {
//					throw (RuntimeException)e;
//				}
//				else {
//					throw new MutagenException("Could not update "+
//						"\"schema_version\" column family to state "+version+
//						"; schema is now out of sync with recorded version",e);
//				}
//			}
//		}
	}
 
Example 17
Source File: EdgeShardSerializationTest.java    From usergrid with Apache License 2.0 3 votes vote down vote up
@Test
public void testShardDelete() throws ConnectionException {

    final Id now = IdGenerator.createId( "test" );

    final long timestamp = 2000L;

    final Shard shard1 = new Shard( 1000L, timestamp, false );
    final Shard shard2 = new Shard( shard1.getShardIndex(), timestamp * 2, true );
    final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp * 3, true );



    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );
    MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );
    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );
    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );

    batch.execute();


    Iterator<Shard> results =
        edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    // Latest timestamp  comes first
    assertEquals( shard3, results.next() );

    // This should now not remove anything
    edgeShardSerialization.removeShardMeta( scope, shard3, sourceEdgeMeta ).execute();


    // Get iterator again
    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    // We should still have shard2 stored
    assertEquals( shard2, results.next() );



}
 
Example 18
Source File: EdgeSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Tests mixing 2 edge types between 2 nodes.  We should get results for the same source->destination with the 2
 * edge types
 */
@Test
@Category(StressTest.class)
public void testIteratorPaging() throws ConnectionException {


    final Id sourceId = IdGenerator.createId( "source" );
    final String edgeType = "edge";
    final Id targetId = IdGenerator.createId( "target" );


    int writeCount = graphFig.getScanPageSize() * 3;


    final MutationBatch batch = keyspace.prepareMutationBatch();

    long timestamp = 10000l;

    for ( int i = 0; i < writeCount; i++ ) {

        final MarkedEdge edge = createEdge( sourceId, edgeType, targetId, timestamp );

        batch.mergeShallow( serialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ) );

        //increment timestamp (not done inline on purpose) If we do System.currentMillis we get the same edge on
        // fast systems
        timestamp++;
    }

    logger.info( "Flushing edges" );
    batch.execute();


    Iterator<MarkedEdge> results = serialization
            .getEdgeVersions( scope, createGetByEdge( sourceId, edgeType, targetId, timestamp, null ) );

    verify( results, writeCount );


    //get them all from source
    results = serialization.getEdgesFromSource( scope, createSearchByEdge( sourceId, edgeType, timestamp, null ) );

    verify( results, writeCount );


    results = serialization.getEdgesFromSourceByTargetType( scope,
            createSearchByEdgeAndId( sourceId, edgeType, timestamp, targetId.getType(), null ) );

    verify( results, writeCount );


    results = serialization.getEdgesToTarget( scope, createSearchByEdge( targetId, edgeType, timestamp, null ) );

    verify( results, writeCount );


    results = serialization.getEdgesToTargetBySourceType( scope,
            createSearchByEdgeAndId( targetId, edgeType, timestamp, sourceId.getType(), null ) );

    verify( results, writeCount );
}
 
Example 19
Source File: EdgeShardSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
@Test
public void saveReturnDelete() throws ConnectionException {

    final Id now = IdGenerator.createId( "test" );


    final long timestamp = 10000l;

    final Shard shard1 = new Shard( 1000l, timestamp, false );

    final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );

    final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );


    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );


    MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );

    batch.execute();


    Iterator<Shard> results =
            edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertEquals( shard1, results.next() );

    assertFalse( results.hasNext() );

    //test nothing with other type

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType( now, "edgeType", "subType" );

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), targetEdgeMeta );

    assertFalse( results.hasNext() );


    //test paging and size
    edgeShardSerialization.removeShardMeta( scope, shard1, sourceEdgeMeta ).execute();

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );

    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertFalse( results.hasNext() );


    edgeShardSerialization.removeShardMeta( scope, shard2, sourceEdgeMeta ).execute();

    edgeShardSerialization.removeShardMeta( scope, shard3, sourceEdgeMeta ).execute();

    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );


    assertFalse( results.hasNext() );
}
 
Example 20
Source File: EdgeShardSerializationTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
@Test
public void saveReturn() throws ConnectionException {

    final Id now = IdGenerator.createId( "test" );

    final long timestamp = 10000l;

    final Shard shard1 = new Shard( 1000l, timestamp, false );

    final Shard shard2 = new Shard( shard1.getShardIndex() * 2, timestamp, true );

    final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp, false );

    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );

    MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );

    batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );

    batch.execute();


    Iterator<Shard> results =
            edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );


    assertEquals( shard3, results.next() );

    assertEquals( shard2, results.next() );

    assertEquals( shard1, results.next() );


    assertFalse( results.hasNext() );

    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType( now, "edgeType", "subType" );

    //test we get nothing with the other node type
    results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), targetEdgeMeta );

    assertFalse( results.hasNext() );


    //test paging and size
    results = edgeShardSerialization.getShardMetaData( scope, Optional.of( shard2 ), sourceEdgeMeta );

    assertEquals( shard2, results.next() );


    assertEquals( shard1, results.next() );


    assertFalse( results.hasNext() );
}