com.netflix.astyanax.util.RangeBuilder Java Examples

The following examples show how to use com.netflix.astyanax.util.RangeBuilder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EdgeMetadataSerializationV1Impl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
private RangeBuilder createRange( final SearchEdgeType search ) {
    final RangeBuilder builder = new RangeBuilder().setLimit( graphFig.getScanPageSize() );


    //we have a last, it's where we need to start seeking from
    if ( search.getLast().isPresent() ) {
        builder.setStart( search.getLast().get() );
    }

    //no last was set, but we have a prefix, set it
    else if ( search.prefix().isPresent() ) {
        builder.setStart( search.prefix().get() );
    }


    //we have a prefix, so make sure we only seek to prefix + max UTF value
    if ( search.prefix().isPresent() ) {
        builder.setEnd( search.prefix().get() + "\uffff" );
    }


    return builder;
}
 
Example #2
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<String> listQueues() {
    final Iterator<Row<String, UUID>> rowIter = execute(
            _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getAllRows()
                    .setRowLimit(100)
                    .withColumnRange(new RangeBuilder().setLimit(1).build()))
            .iterator();
    return new AbstractIterator<String>() {
        @Override
        protected String computeNext() {
            while (rowIter.hasNext()) {
                Row<String, UUID> row = rowIter.next();
                if (!row.getColumns().isEmpty()) {
                    return row.getKey();
                }
            }
            return endOfData();
        }
    };
}
 
Example #3
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<String> scanKeys(AstyanaxStorage storage, final ReadConsistency consistency) {
    checkNotNull(storage, "storage");
    checkNotNull(consistency, "consistency");

    final DeltaPlacement placement = (DeltaPlacement) storage.getPlacement();

    // We just want row keys, but get at least one column so we can ignore range ghosts.
    final ByteBufferRange columnRange = new RangeBuilder().setLimit(1).build();
    final LimitCounter unlimited = LimitCounter.max();

    // Loop over all the range prefixes (2^shardsLog2 of them) and, for each, execute Cassandra queries to
    // page through the records with that prefix.
    final Iterator<ByteBufferRange> scanIter = storage.scanIterator(null);
    return touch(Iterators.concat(new AbstractIterator<Iterator<String>>() {
        @Override
        protected Iterator<String> computeNext() {
            if (scanIter.hasNext()) {
                ByteBufferRange keyRange = scanIter.next();
                return decodeKeys(rowScan(placement, keyRange, columnRange, unlimited, consistency));
            }
            return endOfData();
        }
    }));
}
 
Example #4
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) {
    // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since
    // we always delete smaller column values before deleting larger column values--scanning will hit the max
    // before needing to skip over tombstones.
    Map<UUID, ByteBuffer> resultMap = Maps.newHashMap();
    for (List<UUID> batch : Iterables.partition(dataIds, 10)) {
        Rows<UUID, ByteBuffer> rows = execute(
                _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
                        .getKeySlice(batch)
                        .withColumnRange(new RangeBuilder()
                                .setReversed(true)
                                .setLimit(1)
                                .build()));
        for (Row<UUID, ByteBuffer> row : rows) {
            UUID dataId = row.getKey();
            for (Column<ByteBuffer> column : row.getColumns()) {
                resultMap.put(dataId, column.getName());
            }
        }
    }
    return resultMap;
}
 
Example #5
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<ByteBuffer> scanRecords(UUID dataId, @Nullable ByteBuffer from, @Nullable final ByteBuffer to,
                                        int batchSize, int limit) {
    final Iterator<Column<ByteBuffer>> iter = executePaginated(
            _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(dataId)
                    .withColumnRange(new RangeBuilder()
                            .setStart(Objects.firstNonNull(from, EMPTY_BUFFER))
                            .setEnd(Objects.firstNonNull(to, EMPTY_BUFFER))
                            .setLimit(batchSize)
                            .build())
                    .autoPaginate(true));

    return Iterators.limit(new AbstractIterator<ByteBuffer>() {
        @Override
        protected ByteBuffer computeNext() {
            while (iter.hasNext()) {
                ByteBuffer record = iter.next().getName();
                if (!record.equals(to)) {  // To is exclusive
                    return record;
                }
            }
            return endOfData();
        }
    }, limit);
}
 
Example #6
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<String> listChannels() {
    final Iterator<Row<String, ByteBuffer>> rowIter = execute(
            _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getAllRows()
                    .setRowLimit(1000)
                    .withColumnRange(new RangeBuilder().setLimit(1).build()))
            .iterator();
    return new AbstractIterator<String>() {
        @Override
        protected String computeNext() {
            while (rowIter.hasNext()) {
                Row<String, ByteBuffer> row = rowIter.next();
                if (!row.getColumns().isEmpty()) {
                    return row.getKey();
                }
            }
            return endOfData();
        }
    };
}
 
Example #7
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 5 votes vote down vote up
private long countRowsInColumn(Table tbl, ColumnGroup column) {
    Objects.requireNonNull(tbl, "table");

    AstyanaxTable table = (AstyanaxTable) tbl;
    AstyanaxStorage storage = table.getReadStorage();
    BlobPlacement placement = (BlobPlacement) storage.getPlacement();

    // Limit the # of columns to retrieve since we just want to count rows, but we need one column to ignore range
    // ghosts.
    CompositeSerializer colSerializer = CompositeSerializer.get();
    ByteBufferRange columnRange = new RangeBuilder()
            .setStart(getColumnPrefix(column, Composite.ComponentEquality.LESS_THAN_EQUAL), colSerializer)
            .setEnd(getColumnPrefix(column, Composite.ComponentEquality.GREATER_THAN_EQUAL), colSerializer)
            .setLimit(1)
            .build();
    LimitCounter unlimited = LimitCounter.max();

    // Range query all the shards and count the number of rows in each.
    long count = 0;
    Iterator<ByteBufferRange> scanIter = storage.scanIterator(null);
    while (scanIter.hasNext()) {
        ByteBufferRange keyRange = scanIter.next();
        Iterator<Row<ByteBuffer, Composite>> rowIter = scanInternal(placement, keyRange, columnRange, unlimited);
        while (rowIter.hasNext()) {
            if (!rowIter.next().getColumns().isEmpty()) {
                count++;
            }
        }
    }
    return count;
}
 
Example #8
Source File: AstyanaxReader.java    From blueflood with Apache License 2.0 5 votes vote down vote up
protected Map<Locator, ColumnList<Long>> getColumnsFromDB(List<Locator> locators, ColumnFamily<Locator, Long> CF,
                                                        Range range) {
    if (range.getStart() > range.getStop()) {
        throw new RuntimeException(String.format("Invalid rollup range: ", range.toString()));
    }
    boolean isBatch = locators.size() != 1;

    final Map<Locator, ColumnList<Long>> columns = new HashMap<Locator, ColumnList<Long>>();
    final RangeBuilder rangeBuilder = new RangeBuilder().setStart(range.getStart()).setEnd(range.getStop());

    Timer.Context ctx = isBatch ? Instrumentation.getBatchReadTimerContext(CF.getName()) : Instrumentation.getReadTimerContext(CF.getName());
    try {
        // We don't paginate this call. So we should make sure the number of reads is tolerable.
        // TODO: Think about paginating this call.
        OperationResult<Rows<Locator, Long>> query = keyspace
                .prepareQuery(CF)
                .getKeySlice(locators)
                .withColumnRange(rangeBuilder.build())
                .execute();
        for (Row<Locator, Long> row : query.getResult()) {
            columns.put(row.getKey(), row.getColumns());
        }

    } catch (ConnectionException e) {
        if (e instanceof NotFoundException) { // TODO: Not really sure what happens when one of the keys is not found.
            Instrumentation.markNotFound(CF.getName());
        } else {
            if (isBatch) { Instrumentation.markBatchReadError(e); }
            else { Instrumentation.markReadError(e); }
        }
        log.error((isBatch ? "Batch " : "") + " read query failed for column family " + CF.getName() + " for locators: " + StringUtils.join(locators, ","), e);
    } finally {
        ctx.stop();
    }

    return columns;
}
 
Example #9
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
/**
 * Reads the ordered manifest for a channel.  The read can either be weak or strong.  A weak read will use CL1
 * and may use the cached oldest slab from a previous strong call to improve performance.  A strong read will use
 * CL local_quorum and will always read the entire manifest row.  This makes a weak read significantly faster than a
 * strong read but also means the call is not guaranteed to return the entire manifest.  Because of this at least
 * every 10 seconds a weak read for a channel is automatically promoted to a strong read.
 *
 * The vast majority of calls to this method are performed during a "peek" or "poll" operation.  Since these are
 * typically called repeatedly a weak call provides improved performance while guaranteeing that at least every
 * 10 seconds the manifest is strongly read so no slabs are missed over time.  Calls which must guarantee
 * the full manifest should explicitly request strong consistency.
 */
private Iterator<Column<ByteBuffer>> readManifestForChannel(final String channel, final boolean weak) {
    final ByteBuffer oldestSlab = weak ? _oldestSlab.getIfPresent(channel) : null;
    final ConsistencyLevel consistency;

    RangeBuilder range = new RangeBuilder().setLimit(50);
    if (oldestSlab != null) {
        range.setStart(oldestSlab);
        consistency = ConsistencyLevel.CL_LOCAL_ONE;
    } else {
        consistency = ConsistencyLevel.CL_LOCAL_QUORUM;
    }

    final Iterator<Column<ByteBuffer>> manifestColumns = executePaginated(
            _keyspace.prepareQuery(ColumnFamilies.MANIFEST, consistency)
                    .getKey(channel)
                    .withColumnRange(range.build())
                    .autoPaginate(true));

    if (oldestSlab != null) {
        // Query was executed weakly using the cached oldest slab, so don't update the cache with an unreliable oldest value
        return manifestColumns;
    } else {
        PeekingIterator<Column<ByteBuffer>> peekingManifestColumns = Iterators.peekingIterator(manifestColumns);
        if (peekingManifestColumns.hasNext()) {
            // Cache the first slab returned from querying the full manifest column family since it is the oldest.
            cacheOldestSlabForChannel(channel, TimeUUIDSerializer.get().fromByteBuffer(peekingManifestColumns.peek().getName()));
            return peekingManifestColumns;
        } else {
            // Channel was completely empty.  Cache a TimeUUID for the current time.  This will cause future calls
            // to read at most 1 minute of tombstones until the cache expires 10 seconds later.
            cacheOldestSlabForChannel(channel, TimeUUIDs.newUUID());
            return Iterators.emptyIterator();
        }
    }
}
 
Example #10
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) {
    // Use a column range with a "start" to skip past tombstones.
    ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
            .getKey(dataId)
            .withColumnRange(new RangeBuilder()
                    .setStart(Objects.firstNonNull(from, EMPTY_BUFFER))
                    .setLimit(1)
                    .build()));
    return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null;
}
 
Example #11
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
public Map<UUID, String> loadSegments(String queue) {
    Map<UUID, String> resultMap = Maps.newHashMap();
    Iterator<Column<UUID>> iter = executePaginated(
            _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(queue)
                    .withColumnRange(new RangeBuilder().setLimit(100).build())
                    .autoPaginate(true));
    while (iter.hasNext()) {
        Column<UUID> column = iter.next();
        resultMap.put(column.getName(), column.getStringValue());
    }
    return resultMap;
}
 
Example #12
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 5 votes vote down vote up
@ParameterizedTimed(type = "AstyanaxStorageProvider")
@Override
public Iterator<Map.Entry<String, StorageSummary>> scanMetadata(Table tbl, @Nullable String fromBlobIdExclusive,
                                                                final LimitCounter limit) {
    Objects.requireNonNull(tbl, "table");
    checkArgument(fromBlobIdExclusive == null || Names.isLegalBlobId(fromBlobIdExclusive), "fromBlobIdExclusive");
    checkArgument(limit.remaining() > 0, "Limit must be >0");

    final AstyanaxTable table = (AstyanaxTable) tbl;
    AstyanaxStorage storage = table.getReadStorage();
    final BlobPlacement placement = (BlobPlacement) storage.getPlacement();

    // Do a column range query on all the A and B columns.  Don't get the Z columns with the binary data.
    CompositeSerializer colSerializer = CompositeSerializer.get();
    final ByteBufferRange columnRange = new RangeBuilder()
            .setStart(getColumnPrefix(ColumnGroup.A, Composite.ComponentEquality.LESS_THAN_EQUAL), colSerializer)
            .setEnd(getColumnPrefix(ColumnGroup.B, Composite.ComponentEquality.GREATER_THAN_EQUAL), colSerializer)
            .build();

    // Loop over all the range prefixes (256 of them) and, for each, execute Cassandra queries to page through the
    // records with that prefix.
    final Iterator<ByteBufferRange> scanIter = storage.scanIterator(fromBlobIdExclusive);
    return touch(Iterators.concat(new AbstractIterator<Iterator<Map.Entry<String, StorageSummary>>>() {
        @Override
        protected Iterator<Map.Entry<String, StorageSummary>> computeNext() {
            if (scanIter.hasNext()) {
                ByteBufferRange keyRange = scanIter.next();
                return decodeMetadataRows(scanInternal(placement, keyRange, columnRange, limit), table);
            }
            return endOfData();
        }
    }));
}
 
Example #13
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
public boolean moveIfFast(String fromChannel, String toChannel) {
    Iterator<Column<ByteBuffer>> manifestColumns = executePaginated(
            _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(fromChannel)
                    .withColumnRange(new RangeBuilder().setLimit(50).build())
                    .autoPaginate(true));

    List<ByteBuffer> closedSlabs = Lists.newArrayList();
    boolean movedAll = true;
    while (manifestColumns.hasNext()) {
        Column<ByteBuffer> manifestColumn = manifestColumns.next();
        ByteBuffer slabId = manifestColumn.getName();
        boolean open = manifestColumn.getBooleanValue();
        if (open) {
            // Can't safely re-assign open slabs to another channel since writers may still be writing.
            movedAll = false;  // All events in the open slab might be deleted, but don't check for that here.
            continue;
        }
        closedSlabs.add(slabId);
        if (closedSlabs.size() >= SLAB_MOVE_BATCH) {
            _manifestPersister.move(fromChannel, toChannel, closedSlabs, false);
            closedSlabs.clear();
        }
    }
    if (!closedSlabs.isEmpty()) {
        _manifestPersister.move(fromChannel, toChannel, closedSlabs, false);
    }

    return movedAll;
}
 
Example #14
Source File: EdgeMetadataSerializationV1Impl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
/**
 * Get the edge types from the search criteria.
 *
 * @param scope The org scope
 * @param search The edge type search info
 * @param cf The column family to execute on
 */
private Iterator<String> getEdgeTypes( final ApplicationScope scope, final SearchEdgeType search,
                                       final MultiTenantColumnFamily<ScopedRowKey<Id>, String> cf ) {
    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateSearchEdgeType( search );


    final ScopedRowKey< Id> sourceKey = new ScopedRowKey<>( scope.getApplication(), search.getNode() );


    //resume from the last if specified.  Also set the range


    final RangeBuilder rangeBuilder = createRange( search );

    RowQuery<ScopedRowKey<Id>, String> query =
            keyspace.prepareQuery( cf ).getKey( sourceKey ).autoPaginate( true )
                    .withColumnRange( rangeBuilder.build() );

    return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() );
}
 
Example #15
Source File: EdgeSearcher.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public void buildRange( final RangeBuilder rangeBuilder ) {

    //set our start range since it was supplied to us
    if ( last.isPresent() ) {
        C sourceEdge = createColumn( last.get() );

        rangeBuilder.setStart( sourceEdge, getSerializer() );
    }else {
        setTimeScan( rangeBuilder );
    }

    setRangeOptions( rangeBuilder );


}
 
Example #16
Source File: EdgeMetadataSerializationV1Impl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
/**
 * Get the id types from the specified column family
 *
 * @param scope The organization scope to use
 * @param search The search criteria
 * @param cf The column family to search
 */
public Iterator<String> getIdTypes( final ApplicationScope scope, final SearchIdType search,
                                    final MultiTenantColumnFamily<ScopedRowKey<EdgeIdTypeKey>, String> cf ) {
    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateSearchEdgeIdType( search );


    final ScopedRowKey<EdgeIdTypeKey> sourceTypeKey =
            new ScopedRowKey<>( scope.getApplication(), new EdgeIdTypeKey( search.getNode(), search.getEdgeType() ) );


    final RangeBuilder rangeBuilder = createRange( search );


    RowQuery<ScopedRowKey<EdgeIdTypeKey>, String> query =
            keyspace.prepareQuery( cf ).getKey( sourceTypeKey ).autoPaginate( true )
                    .withColumnRange( rangeBuilder.build() );


    return new ColumnNameIterator<>( query, PARSER, search.getLast().isPresent() );
}
 
Example #17
Source File: MultiKeyColumnNameIteratorTest.java    From usergrid with Apache License 2.0 4 votes vote down vote up
private static ColumnNameIterator<Long, Long> createIterator( final String rowKey, final boolean reversed ) {


        final ColumnParser<Long, Long> longParser = new ColumnParser<Long, Long>() {
            @Override
            public Long parseColumn( final Column<Long> column ) {
                return column.getName();
            }
        };

        final RangeBuilder forwardRange = new RangeBuilder().setLimit( 720 ).setReversed( reversed );


        final RowQuery<String, Long> forwardQuery =
                keyspace.prepareQuery( COLUMN_FAMILY ).getKey( rowKey ).withColumnRange( forwardRange.build() );


        ColumnNameIterator<Long, Long> itr = new ColumnNameIterator<>( forwardQuery, longParser, false );

        return itr;
    }
 
Example #18
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 4 votes vote down vote up
private void purge(AstyanaxStorage storage, boolean deleteMetadata, boolean deleteData, Runnable progress) {
    BlobPlacement placement = (BlobPlacement) storage.getPlacement();
    CassandraKeyspace keyspace = placement.getKeyspace();
    ColumnFamily<ByteBuffer, Composite> cf = placement.getBlobColumnFamily();

    // Limit the query to a single column since we mainly just want the row keys (but not zero columns because
    // then we couldn't distinguish a live row from a row that has been deleted already).
    ByteBufferRange columnRange = new RangeBuilder().setLimit(1).build();

    MutationBatch mutation = keyspace.prepareMutationBatch(CONSISTENCY_STRONG);

    LimitCounter unlimited = LimitCounter.max();

    // Range query all the shards and delete all the rows we find.
    Iterator<ByteBufferRange> scanIter = storage.scanIterator(null);
    while (scanIter.hasNext()) {
        ByteBufferRange keyRange = scanIter.next();
        Iterator<Row<ByteBuffer, Composite>> rowIter = scanInternal(placement, keyRange, columnRange, unlimited);
        while (rowIter.hasNext()) {
            Row<ByteBuffer, Composite> row = rowIter.next();
            if (row.getColumns().isEmpty()) {
                continue;  // don't bother deleting range ghosts
            }

            if (deleteMetadata && deleteData) {
                mutation.withRow(cf, row.getKey()).delete();
            } else {
                if (deleteMetadata) {
                    mutation.withRow(cf, row.getKey())
                            .deleteColumn(getColumn(ColumnGroup.A, 0));
                }

                if (deleteData) {
                    mutation.withRow(cf, row.getKey())
                            .deleteColumn(getColumn(ColumnGroup.B, 1))
                            .deleteColumn(getColumn(ColumnGroup.Z, 2));
                }
            }

            if (mutation.getRowCount() >= 100) {
                progress.run();
                execute(mutation);
                mutation.discardMutations();
            }
        }
    }
    if (!mutation.isEmpty()) {
        progress.run();
        execute(mutation);
    }
}
 
Example #19
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 4 votes vote down vote up
@Override
public QueryResult readRow(String key, Integer columnCount, String startColumn, String endColumn, Boolean reversed) throws PaasException {
    invariant();
    
    try {
        // Construct the query
        RowQuery<ByteBuffer, ByteBuffer> query = keyspace
                .prepareQuery(this.columnFamily)
                .getRow(serializers.keyAsByteBuffer(key));
                
        RangeBuilder range = new RangeBuilder();
        if (columnCount != null && columnCount > 0) {
            range.setLimit(columnCount);
        }
        if (startColumn != null && !startColumn.isEmpty()) {
            range.setStart(serializers.columnAsByteBuffer(startColumn));
        }
        if (endColumn != null && !endColumn.isEmpty()) {
            range.setEnd(serializers.columnAsByteBuffer(endColumn));
        }
        range.setReversed(reversed);
        query.withColumnRange(range.build());

        // Execute the query
        ColumnList<ByteBuffer> result = query.execute().getResult();
        
        // Convert raw data into a simple sparse tree
        SchemalessRows.Builder builder = SchemalessRows.builder();
        Map<String, String> columns = Maps.newHashMap();
        if (!result.isEmpty()) {
            for (Column<ByteBuffer> column : result) { 
                columns.put(serializers.columnAsString(column.getRawName()), serializers.valueAsString(column.getRawName(), column.getByteBufferValue()));
            }
            builder.addRow(key, columns);
        }

        QueryResult dr = new QueryResult();
        dr.setSrows(builder.build());
        return dr;
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to read row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example #20
Source File: EdgeShardSerializationImpl.java    From usergrid with Apache License 2.0 3 votes vote down vote up
@Override
public Iterator<Shard> getShardMetaData( final ApplicationScope scope,
                                         final Optional<Shard> start,   final DirectedEdgeMeta metaData  ) {

    ValidationUtils.validateApplicationScope( scope );
    GraphValidation.validateDirectedEdgeMeta( metaData );


    Preconditions.checkNotNull( metaData, "metadata must be present" );

    /**
     * If the edge is present, we need to being seeking from this
     */

    final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( graphFig.getScanPageSize() );

    if ( start.isPresent() ) {
        final Shard shard = start.get();
        GraphValidation.valiateShard( shard );
        rangeBuilder.setStart( shard.getShardIndex() );
    }


    final ScopedRowKey rowKey = ScopedRowKey.fromKey( scope.getApplication(), metaData );


    final RowQuery<ScopedRowKey<DirectedEdgeMeta>, Long> query =
            keyspace.prepareQuery( EDGE_SHARDS ).setConsistencyLevel( cassandraConfig.getReadCL() ).getKey( rowKey )
                    .autoPaginate( true ).withColumnRange( rangeBuilder.build() );


    return new ColumnNameIterator<>( query, COLUMN_PARSER, false );
}
 
Example #21
Source File: ShardsColumnIterator.java    From usergrid with Apache License 2.0 3 votes vote down vote up
/**
 * Advance our iterator to the next row (assumes the check for row keys is elsewhere)
 */
private void startIterator() {

    if (logger.isTraceEnabled()) {
        logger.trace("Starting shards column iterator");
    }

    final RangeBuilder rangeBuilder = new RangeBuilder().setLimit( pageSize );


    // set the range into the search
    searcher.buildRange( rangeBuilder );


    if(smartShardSeekEnabled){

        // get the rows keys and their corresponding 'shardEnd' that we will seek from
        final List<SmartShard> rowKeysWithShardEnd = searcher.getRowKeysWithShardEnd();

        final boolean ascending = searcher.getOrder() == SearchByEdgeType.Order.ASCENDING;

        currentColumnIterator = new MultiRowShardColumnIterator<>( keyspace, cf,  consistencyLevel, searcher, searcher,
            searcher.getComparator(), pageSize, rowKeysWithShardEnd, ascending, searcher.getLastTimestamp() );

    }else{


        final List<ScopedRowKey<R>> rowKeys = searcher.getRowKeys();

        currentColumnIterator = new MultiRowColumnIterator<>( keyspace, cf,  consistencyLevel, searcher, searcher,
            searcher.getComparator(), rowKeys, pageSize );


    }


}
 
Example #22
Source File: EdgeSearcher.java    From usergrid with Apache License 2.0 3 votes vote down vote up
@Override
public void buildRange(final RangeBuilder rangeBuilder, final T start, T end) {

    final boolean ascending = order == SearchByEdgeType.Order.ASCENDING;


    if ( start != null){

        C sourceEdge = createColumn( start );

        if(ascending && last.isPresent() && comparator.compare(last.get(), start) < 0){

            sourceEdge = createColumn( last.get() );

        }else if (!ascending && last.isPresent() && comparator.compare(last.get(), start) > 0){

            sourceEdge = createColumn( last.get() );
        }

        rangeBuilder.setStart( sourceEdge, getSerializer() );


    }else{

        setTimeScan( rangeBuilder );


    }

    if( end != null){

        C endEdge = createColumn( end );
        rangeBuilder.setEnd( endEdge, getSerializer() );

    }

    setRangeOptions( rangeBuilder );
}
 
Example #23
Source File: EdgeSearcher.java    From usergrid with Apache License 2.0 3 votes vote down vote up
private void setRangeOptions(final RangeBuilder rangeBuilder){

        //if we're ascending, this is opposite what cassandra sorts, so set the reversed flag
        final boolean reversed = order == SearchByEdgeType.Order.ASCENDING;

        rangeBuilder.setReversed( reversed );

    }
 
Example #24
Source File: ColumnSearch.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Set the range builder with the user supplied start and finish
 */
void buildRange( final RangeBuilder rangeBuilder );
 
Example #25
Source File: ColumnSearch.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Set the start value supplied and the user supplied end value (if present)
 *
 * @param value The value to set in the start
 * @param end
 */
void buildRange(final RangeBuilder rangeBuilder, final T start, T end);
 
Example #26
Source File: MultiRowColumnIterator.java    From usergrid with Apache License 2.0 2 votes vote down vote up
public void advance() {


        if (logger.isTraceEnabled()) logger.trace( "Advancing multi row column iterator" );

        /**
         * If the edge is present, we need to being seeking from this
         */

        final boolean skipFirstColumn = startColumn != null;



        final int selectSize = skipFirstColumn ? pageSize + 1 : pageSize;

        final RangeBuilder rangeBuilder = new RangeBuilder();


        //set the range into the search

        if ( startColumn == null ) {
            columnSearch.buildRange( rangeBuilder );
        }
        else {
            columnSearch.buildRange( rangeBuilder, startColumn, null );
        }


        rangeBuilder.setLimit( selectSize );

        if (logger.isTraceEnabled()) logger.trace( "Executing cassandra query" );

        /**
         * Get our list of slices
         */
        final RowSliceQuery<R, C> query =
            keyspace.prepareQuery( cf ).setConsistencyLevel( consistencyLevel ).getKeySlice( rowKeys )
                .withColumnRange( rangeBuilder.build() );

        final Rows<R, C> result;
        try {
            result = query.execute().getResult();
        }
        catch ( ConnectionException e ) {
            throw new RuntimeException( "Unable to connect to casandra", e );
        }


        //now aggregate them together

        //this is an optimization.  It's faster to see if we only have values for one row,
        // then return the iterator of those columns than
        //do a merge if only one row has data.


        final List<T> mergedResults;

        if ( containsSingleRowOnly( result ) ) {
            mergedResults = singleRowResult( result );
        }
        else {
            mergedResults = mergeResults( result, selectSize );
        }





        //we've parsed everything truncate to the first pageSize, it's all we can ensure is correct without another
        //trip back to cassandra

        //discard our first element (maybe)



        final int size = mergedResults.size();

        moreToReturn = size == selectSize;

        //we have a first column to to check
        if( size > 0) {

            final T firstResult = mergedResults.get( 0 );

            //The search has either told us to skip the first element, or it matches our last, therefore we disregard it
            if(columnSearch.skipFirst( firstResult ) || (skipFirstColumn && comparator.compare( startColumn, firstResult ) == 0)){
                mergedResults.remove( 0 );
            }

        }


        if(moreToReturn && mergedResults.size() > 0){
            startColumn = mergedResults.get( mergedResults.size()  - 1 );
        }


        currentColumnIterator = mergedResults.iterator();

        if (logger.isTraceEnabled()) logger.trace( "Finished parsing {} rows for results", rowKeys.size() );
    }
 
Example #27
Source File: EdgeSearcher.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Set the time scan into the range builder
 * @param rangeBuilder
 */
protected abstract void setTimeScan(final RangeBuilder rangeBuilder);