com.netflix.astyanax.model.Rows Java Examples

The following examples show how to use com.netflix.astyanax.model.Rows. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) {
    // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since
    // we always delete smaller column values before deleting larger column values--scanning will hit the max
    // before needing to skip over tombstones.
    Map<UUID, ByteBuffer> resultMap = Maps.newHashMap();
    for (List<UUID> batch : Iterables.partition(dataIds, 10)) {
        Rows<UUID, ByteBuffer> rows = execute(
                _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
                        .getKeySlice(batch)
                        .withColumnRange(new RangeBuilder()
                                .setReversed(true)
                                .setLimit(1)
                                .build()));
        for (Row<UUID, ByteBuffer> row : rows) {
            UUID dataId = row.getKey();
            for (Column<ByteBuffer> column : row.getColumns()) {
                resultMap.put(dataId, column.getName());
            }
        }
    }
    return resultMap;
}
 
Example #2
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
/**
 * Queries for rows given an enumerated list of Cassandra row keys.
 */
private Iterator<Record> rowQuery(DeltaPlacement placement,
                                  List<Map.Entry<ByteBuffer, Key>> keys,
                                  ReadConsistency consistency) {
    // Build the list of row IDs to query for.
    List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction());

    // Query for Delta & Compaction info, just the first 50 columns for now.
    final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace()
                    .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency))
                    .getKeySlice(rowIds)
                    .withColumnRange(_maxColumnsRange),
            "query %d keys from placement %s", rowIds.size(), placement.getName());

    // Track metrics
    _randomReadMeter.mark(rowIds.size());

    // Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once.
    return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency);
}
 
Example #3
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
/**
 * Decodes rows returned by querying for a specific set of rows.
 */
private Iterator<Record> decodeRows(List<Map.Entry<ByteBuffer, Key>> keys, final Rows<ByteBuffer, DeltaKey> rows,
                                    final int largeRowThreshold, final ReadConsistency consistency) {
    // Avoiding pinning multiple decoded rows into memory at once.
    return Iterators.transform(keys.iterator(), new Function<Map.Entry<ByteBuffer, Key>, Record>() {
        @Override
        public Record apply(Map.Entry<ByteBuffer, Key> entry) {
            Row<ByteBuffer, DeltaKey> row = rows.getRow(entry.getKey());
            if (row == null) {
                return emptyRecord(entry.getValue());
            }
            // Convert the results into a Record object, lazily fetching the rest of the columns as necessary.
            return newRecord(entry.getValue(), row.getRawKey(), row.getColumns(), largeRowThreshold, consistency, null);
        }
    });
}
 
Example #4
Source File: MultiRowColumnIterator.java    From usergrid with Apache License 2.0 6 votes vote down vote up
/**
 * Return true if we have < 2 rows with columns, false otherwise
 */
private boolean containsSingleRowOnly( final Rows<R, C> result ) {

    int count = 0;

    for ( R key : result.getKeys() ) {
        if ( result.getRow( key ).getColumns().size() > 0 ) {
            count++;

            //we have more than 1 row with values, return them
            if ( count > 1 ) {
                return false;
            }
        }
    }

    return true;
}
 
Example #5
Source File: CassandraArchiveRepository.java    From Nicobar with Apache License 2.0 6 votes vote down vote up
/**
 * Get all of the rows in in the table. Attempts to reduce the load on cassandra by splitting up the query into smaller sub-queries
 * @param columns which columns to select
 * @return result rows
 */
protected Iterable<Row<String, String>> getRows(EnumSet<?> columns) throws Exception {
    int shardCount = config.getShardCount();

    List<Future<Rows<String, String>>> futures = new ArrayList<Future<Rows<String, String>>>();
    for (int i = 0; i < shardCount; i++) {
        futures.add(cassandra.selectAsync(generateSelectByShardCql(columns, i)));
    }

    List<Row<String, String>> rows = new LinkedList<Row<String, String>>();
    for (Future<Rows<String, String>> f: futures) {
        Rows<String, String> shardRows = f.get();
        Iterables.addAll(rows, shardRows);
    }

    return rows;
}
 
Example #6
Source File: CassandraArchiveRepositoryTest.java    From Nicobar with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testGetRows() throws Exception {
    EnumSet<Columns> columns = EnumSet.of(Columns.module_id, Columns.module_name);
    Rows<String, String> mockRows = mock(Rows.class);
    Row<String, String> row1 = mock(Row.class);
    Row<String, String> row2 = mock(Row.class);
    List<Row<String, String>> rowList = Arrays.asList(row1, row2);

    when(mockRows.iterator()).thenReturn(rowList.iterator());

    FutureTask<Rows<String, String>> future = new FutureTask<Rows<String, String>>(new Runnable() {
        @Override
        public void run() {
        }
    }, mockRows);
    ExecutorService executor = Executors.newFixedThreadPool(1);
    executor.execute(future);
    when(gateway.selectAsync(anyString())).thenReturn(future);

    repository.getRows(columns);
    List<String> selectList = new ArrayList<String>();
    for (int shardNum = 0; shardNum < config.getShardCount(); shardNum++) {
        selectList.add(repository.generateSelectByShardCql(columns, shardNum));
    }

    InOrder inOrder = inOrder(gateway);
    for (int shardNum = 0; shardNum < config.getShardCount(); shardNum++) {
        inOrder.verify(gateway).selectAsync(selectList.get(shardNum));
    }
}
 
Example #7
Source File: QueryUtils.java    From staash with Apache License 2.0 5 votes vote down vote up
public static String formatQueryResult(CqlStatementResult rs, String cfname) {
    // TODO Auto-generated method stub
    String value = "";
    JsonObject response = new JsonObject();
    ColumnFamily<String, String> cf = ColumnFamily
            .newColumnFamily(cfname, StringSerializer.get(),
                    StringSerializer.get());
    Rows<String, String> rows = rs.getRows(cf);
    int rcount = 1;
    for (com.netflix.astyanax.model.Row<String, String> row : rows) {
        ColumnList<String> columns = row.getColumns();
        Collection<String> colnames = columns.getColumnNames();
        String rowStr = "";
        String colStr = "";
        if (colnames.contains("key") && colnames.contains("column1")) {
        	colStr = colStr + columns.getDateValue("column1", null).toGMTString();
        	rowStr = rowStr + columns.getStringValue("value", null); 
        	response.putString(colStr, rowStr);
        } else {
            JsonObject rowObj = new JsonObject();
         for (String colName:colnames) {
             //colStr = colStr+colname+",";
            value = columns.getStringValue(colName, null);
            //rowStr=rowStr+value+",";
            rowObj.putString(colName, value);
         }
         //rowobj.putString("columns", colStr);
         //rowobj.putString("values", rowStr);
         response.putObject(""+rcount++, rowObj);
        }
    }
    return response.toString();
    
}
 
Example #8
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public QueryResult listRows(String cursor, Integer rowLimit, Integer columnLimit) throws PaasException {
    try {
        invariant();
        
        // Execute the query
        Partitioner partitioner = keyspace.getPartitioner();
        Rows<ByteBuffer, ByteBuffer> result = keyspace
            .prepareQuery(columnFamily)
            .getKeyRange(null,  null, cursor != null ? cursor : partitioner.getMinToken(),  partitioner.getMaxToken(),  rowLimit)
            .execute()
            .getResult();
        
        // Convert raw data into a simple sparse tree
        SchemalessRows.Builder builder = SchemalessRows.builder();
        for (Row<ByteBuffer, ByteBuffer> row : result) { 
            Map<String, String> columns = Maps.newHashMap();
            for (Column<ByteBuffer> column : row.getColumns()) {
                columns.put(serializers.columnAsString(column.getRawName()), serializers.valueAsString(column.getRawName(), column.getByteBufferValue()));
            }
            builder.addRow(serializers.keyAsString(row.getKey()), columns);
        }
        
        QueryResult dr = new QueryResult();
        dr.setSrows(builder.build());
        
        if (!result.isEmpty()) {
            dr.setCursor(partitioner.getTokenForKey(Iterables.getLast(result).getKey()));
        }
        return dr;
    } catch (ConnectionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return null;
}
 
Example #9
Source File: HystrixCassandraGetRowsByQuery.java    From Nicobar with Apache License 2.0 5 votes vote down vote up
@Override
protected Rows<RowKeyType, String> run() throws Exception {
    CqlResult<RowKeyType, String> cqlresult = keyspace.prepareQuery(columnFamily).withCql(cql).execute()
            .getResult();
    Rows<RowKeyType, String> result = cqlresult.getRows();
    return result;
}
 
Example #10
Source File: HystrixCassandraGetRowsByKeys.java    From Nicobar with Apache License 2.0 5 votes vote down vote up
@Override
protected Rows<RowKeyType, String> run() throws Exception {
    RowSliceQuery<RowKeyType, String> rowQuery = null;
    rowQuery = keyspace.prepareQuery(columnFamily).getKeySlice(rowKeys);

    /* apply column slice if we have one */
    if (columns != null) {
        rowQuery = rowQuery.withColumnSlice(columns);
    }
    Rows<RowKeyType, String> result = rowQuery.execute().getResult();
    return result;
}
 
Example #11
Source File: CassandraStorage.java    From greycat with Apache License 2.0 5 votes vote down vote up
@Override
public void get(Buffer keys, Callback<Buffer> callback) {
    try {
        BufferIterator it = keys.iterator();
        final List<byte[]> all_keys = new ArrayList<byte[]>();
        final Map<byte[], byte[]> results = new HashMap<byte[], byte[]>();
        while (it.hasNext()) {
            Buffer keyView = it.next();
            if (keyView != null) {
                all_keys.add(keyView.data());
            }
        }
        Rows<byte[], Integer> rows = keyspace.prepareQuery(MWG).getKeySlice(all_keys).execute().getResult();
        for (int i = 0; i < rows.size(); i++) {
            Row<byte[], Integer> row = rows.getRowByIndex(i);
            if(row != null){
                Column col = row.getColumns().getColumnByName(0);
                if(col != null){
                    results.put(row.getKey(), col.getByteArrayValue());
                }
            }
        }
        Buffer result = graph.newBuffer();
        for (int i = 0; i < all_keys.size(); i++) {
            if (i != 0) {
                result.write(Constants.BUFFER_SEP);
            }
            byte[] resolved = results.get(all_keys.get(i));
            if(resolved != null){
                result.writeAll(resolved);

            }
        }
        callback.on(result);
    } catch (Exception e) {
        e.printStackTrace();
    }

}
 
Example #12
Source File: NodeSerializationImpl.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public Map<Id, Long> getMaxVersions( final ApplicationScope scope, final Collection<? extends Edge> edges ) {
    ValidationUtils.validateApplicationScope( scope );
    Preconditions.checkNotNull( edges, "edges cannot be null" );


    final ColumnFamilyQuery<ScopedRowKey< Id>, Boolean> query =
            keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() );


    final List<ScopedRowKey< Id>> keys =
            new ArrayList<>( edges.size() );

    //worst case all are marked
    final Map<Id, Long> versions = new HashMap<>( edges.size() );

    final Id scopeId = scope.getApplication();

    for ( final Edge edge : edges ) {
        keys.add( ScopedRowKey.fromKey( scopeId, edge.getSourceNode() ) );
        keys.add( ScopedRowKey.fromKey( scopeId, edge.getTargetNode() ) );
    }


    final Rows<ScopedRowKey<Id>, Boolean> results;
    try {
        results = query.getRowSlice( keys ).withColumnSlice( Collections.singletonList( COLUMN_NAME )).execute()
                .getResult();
    }
    catch ( ConnectionException e ) {
        throw new RuntimeException( "Unable to connect to casandra", e );
    }

    for ( Row<ScopedRowKey<Id>, Boolean> row : results ) {
        Column<Boolean> column = row.getColumns().getColumnByName( COLUMN_NAME );

        if ( column != null ) {
            versions.put( row.getKey().getKey(), column.getLongValue() );
        }
    }


    return versions;
}
 
Example #13
Source File: CassandraGatewayImpl.java    From Nicobar with Apache License 2.0 4 votes vote down vote up
@Override
public Future<Rows<String, String>> selectAsync(String cql) {
    return new HystrixCassandraGetRowsByQuery<String>(keyspace, columnFamily, String.class, cql).queue();
}
 
Example #14
Source File: CassandraGatewayImpl.java    From Nicobar with Apache License 2.0 4 votes vote down vote up
@Override
public Rows<String, String> select(String cql) {
    return new HystrixCassandraGetRowsByQuery<String>(keyspace, columnFamily, String.class, cql).execute();
}
 
Example #15
Source File: CassandraGatewayImpl.java    From Nicobar with Apache License 2.0 4 votes vote down vote up
@Override
public Rows<String, String> getRows(String... rowKeys) {
    return new HystrixCassandraGetRowsByKeys<String>(keyspace, columnFamily, rowKeys).execute();
}
 
Example #16
Source File: CassandraArchiveRepository.java    From Nicobar with Apache License 2.0 4 votes vote down vote up
/**
 * Get all of the {@link ScriptArchive}s for the given set of moduleIds. Will perform the operation in batches
 * as specified by {@link CassandraArchiveRepositoryConfig#getArchiveFetchBatchSize()} and outputs the jar files in
 * the path specified by {@link CassandraArchiveRepositoryConfig#getArchiveOutputDirectory()}.
 *
 * @param moduleIds keys to search for
 * @return set of ScriptArchives retrieved from the database
 */
@Override
public Set<ScriptArchive> getScriptArchives(Set<ModuleId> moduleIds) throws IOException {
    Set<ScriptArchive> archives = new LinkedHashSet<ScriptArchive>(moduleIds.size()*2);
    Path archiveOuputDir = getConfig().getArchiveOutputDirectory();
    List<ModuleId> moduleIdList = new LinkedList<ModuleId>(moduleIds);
    int batchSize = getConfig().getArchiveFetchBatchSize();
    int start = 0;
    try {
        while (start < moduleIdList.size()) {
            int end = Math.min(moduleIdList.size(), start + batchSize);
            List<ModuleId> batchModuleIds = moduleIdList.subList(start, end);
            List<String> rowKeys = new ArrayList<String>(batchModuleIds.size());
            for (ModuleId batchModuleId:batchModuleIds) {
                rowKeys.add(batchModuleId.toString());
            }

            Rows<String, String> rows = cassandra.getRows(rowKeys.toArray(new String[0]));
            for (Row<String, String> row : rows) {
                String moduleId = row.getKey();
                ColumnList<String> columns = row.getColumns();
                Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name());
                Column<String> hashColumn = columns.getColumnByName(Columns.archive_content_hash.name());
                Column<String> contentColumn = columns.getColumnByName(Columns.archive_content.name());
                if (lastUpdateColumn == null || hashColumn == null || contentColumn == null) {
                    continue;
                }
                ScriptModuleSpec moduleSpec = getModuleSpec(columns);
                long lastUpdateTime = lastUpdateColumn.getLongValue();
                byte[] hash = hashColumn.getByteArrayValue();
                byte[] content = contentColumn.getByteArrayValue();

                // verify the hash
                if (hash != null && hash.length > 0 && !verifyHash(hash, content)) {
                    logger.warn("Content hash validation failed for moduleId {}. size: {}", moduleId, content.length);
                    continue;
                }
                String fileName = new StringBuilder().append(moduleId).append("-").append(lastUpdateTime).append(".jar").toString();
                Path jarFile = archiveOuputDir.resolve(fileName);
                Files.write(jarFile, content);
                JarScriptArchive scriptArchive = new JarScriptArchive.Builder(jarFile)
                    .setModuleSpec(moduleSpec)
                    .setCreateTime(lastUpdateTime)
                    .build();
                archives.add(scriptArchive);
            }
            start = end;
        }
    } catch (Exception e) {
        throw new IOException(e);
    }
    return archives;
}
 
Example #17
Source File: MultiRowColumnIterator.java    From usergrid with Apache License 2.0 4 votes vote down vote up
/**
 * Multiple rows are present, merge them into a single result set
 * @param result
 * @return
 */
private List<T> mergeResults( final Rows<R, C> result, final int maxSize ) {

    if (logger.isTraceEnabled()) logger.trace( "Multiple rows have columns.  Merging" );


    final List<T> mergedResults = new ArrayList<>(maxSize);




    for ( final R key : result.getKeys() ) {
        final ColumnList<C> columns = result.getRow( key ).getColumns();


        for (final Column<C> column :columns  ) {

            final T returnedValue = columnParser.parseColumn( column );

            //Use an O(log n) search, same as a tree, but with fast access to indexes for later operations
            int searchIndex = Collections.binarySearch( mergedResults, returnedValue, comparator );

            /**
             * DO NOT remove this section of code. If you're seeing inconsistent results during shard transition,
             * you'll
             * need to enable this
             */
            //
            //                if ( previous != null && comparator.compare( previous, returnedValue ) == 0 ) {
            //                    throw new RuntimeException( String.format(
            //                            "Cassandra returned 2 unique columns,
            // but your comparator marked them as equal.  This " +
            //                                    "indicates a bug in your comparator.  Previous value was %s and
            // current value is " +
            //                                    "%s",
            //                            previous, returnedValue ) );
            //                }
            //
            //                previous = returnedValue;

            //we've already seen it, no-op
            if(searchIndex > -1){
                continue;
            }

            final int insertIndex = (searchIndex+1)*-1;

            //it's at the end of the list, don't bother inserting just to remove it
            if(insertIndex >= maxSize){
                continue;
            }

            if (logger.isTraceEnabled()) logger.trace( "Adding value {} to merged set at index {}", returnedValue, insertIndex );

            mergedResults.add( insertIndex, returnedValue );


            //prune the mergedResults
            while ( mergedResults.size() > maxSize ) {

                if (logger.isTraceEnabled()) logger.trace( "Trimming results to size {}", maxSize );

                //just remove from our tail until the size falls to the correct value
                mergedResults.remove(mergedResults.size()-1);
            }
        }

        if (logger.isTraceEnabled()) logger.trace( "Candidate result set size is {}", mergedResults.size() );

    }
    return mergedResults;
}
 
Example #18
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 4 votes vote down vote up
/**
 * Queries for rows within the specified range, exclusive on start and inclusive on end.
 */
private Iterator<Row<ByteBuffer, Composite>> scanInternal(final BlobPlacement placement, final ByteBufferRange keyRange,
                                                          final ByteBufferRange columnRange, final LimitCounter limit) {
    return Iterators.concat(new AbstractIterator<Iterator<Row<ByteBuffer, Composite>>>() {
        private ByteBuffer _rangeStart = keyRange.getStart();
        private final ByteBuffer _rangeEnd = keyRange.getEnd();
        private int _minimumLimit = 1;
        private boolean _done;

        @Override
        protected Iterator<Row<ByteBuffer, Composite>> computeNext() {
            // Note: if Cassandra is asked to perform a token range query where start >= end it will wrap
            // around which is absolutely *not* what we want since it could return data for another table.
            if (_done || BufferUtils.compareUnsigned(_rangeStart, _rangeEnd) >= 0) {
                return endOfData();
            }

            Timer.Context timer = _scanBatchTimer.time();
            try {
                int batchSize = (int) Math.min(Math.max(limit.remaining(), _minimumLimit), MAX_SCAN_METADATA_BATCH_SIZE);
                // Increase the minimum limit a bit each time around so if we start encountering lots of range
                // ghosts we eventually scan through them at a reasonable rate.
                _minimumLimit = Math.min(_minimumLimit + 3, MAX_SCAN_METADATA_BATCH_SIZE);

                // Pass token strings to get exclusive start behavior, to support 'fromBlobIdExclusive'.
                Rows<ByteBuffer, Composite> rows = execute(placement.getKeyspace()
                        .prepareQuery(placement.getBlobColumnFamily(), _readConsistency)
                        .getKeyRange(null, null, toTokenString(_rangeStart), toTokenString(_rangeEnd), batchSize)
                        .withColumnRange(columnRange));

                if (rows.size() >= batchSize) {
                    // Save the last row key so we can use it as the start (exclusive) if we must query to get more data.
                    _rangeStart = rows.getRowByIndex(rows.size() - 1).getKey();
                } else {
                    // If we got fewer rows than we asked for, another query won't find more rows.
                    _done = true;
                }

                // Track metrics
                _scanReadMeter.mark(rows.size());

                // Return the rows.  Filter out range ghosts (deleted rows with no columns)
                final Iterator<Row<ByteBuffer, Composite>> rowIter = rows.iterator();
                return new AbstractIterator<Row<ByteBuffer, Composite>>() {
                    @Override
                    protected Row<ByteBuffer, Composite> computeNext() {
                        while (rowIter.hasNext()) {
                            Row<ByteBuffer, Composite> row = rowIter.next();
                            if (!row.getColumns().isEmpty()) {
                                return row;
                            }
                        }
                        return endOfData();
                    }
                };
            } finally {
                timer.stop();
            }
        }
    });
}
 
Example #19
Source File: MultiRowColumnIterator.java    From usergrid with Apache License 2.0 3 votes vote down vote up
/**
 * A single row is present, only parse the single row
 * @param result
 * @return
 */
private List<T> singleRowResult( final Rows<R, C> result ) {

    if (logger.isTraceEnabled()) logger.trace( "Only a single row has columns.  Parsing directly" );

    for ( R key : result.getKeys() ) {
        final ColumnList<C> columnList = result.getRow( key ).getColumns();

        final int size = columnList.size();

        if ( size > 0 ) {

            final List<T> results = new ArrayList<>(size);

            for(Column<C> column: columnList){
                results.add(columnParser.parseColumn( column ));
            }

            return results;


        }
    }

    //we didn't have any results, just return nothing
    return Collections.<T>emptyList();
}
 
Example #20
Source File: MultiRowShardColumnIterator.java    From usergrid with Apache License 2.0 3 votes vote down vote up
/**
 * Process the result set and filter any duplicates that may have already been seen in previous shards.  During
 * a shard transition, there could be the same columns in multiple shards (rows).  This will also allow for
 * filtering the startColumn (the seek starting point) when paging a row in Cassandra.
 *
 * @param result
 * @return
 */
private List<T> processResults(final Rows<R, C> result, final int maxSize ) {

    final List<T> mergedResults = new ArrayList<>(maxSize);

    for ( final R key : result.getKeys() ) {
        final ColumnList<C> columns = result.getRow( key ).getColumns();


        for (final Column<C> column :columns  ) {

            final T returnedValue = columnParser.parseColumn( column );

            // use an O(log n) search, same as a tree, but with fast access to indexes for later operations
            int searchIndex = Collections.binarySearch( resultsTracking, returnedValue, comparator );


            //we've already seen the column, filter it out as we might be in a shard transition or our start column
            if(searchIndex > -1){
                if(logger.isTraceEnabled()){
                    logger.trace("skipping column as it was already retrieved before");
                }
                skipSize++;
                continue;
            }


            resultsTracking.add(returnedValue);
            mergedResults.add(returnedValue );


        }

        if (logger.isTraceEnabled()) logger.trace( "Candidate result set size is {}", mergedResults.size() );

    }
    return mergedResults;
}
 
Example #21
Source File: CassandraGateway.java    From Nicobar with Apache License 2.0 2 votes vote down vote up
/**
 * Gets all columns for all the listed row keys.
 * @param rowKeys a list of row keys.
 * @return list of rows, possibly null.
 */
public Rows<String, String> getRows(String... rowKeys);
 
Example #22
Source File: CassandraGateway.java    From Nicobar with Apache License 2.0 2 votes vote down vote up
/**
 * Performs a CQL query and returns result.
 *
 * @param cql the CQL query string.
 * @return resulting row set, could be null.
 */
public Rows<String, String> select(String cql);
 
Example #23
Source File: CassandraGateway.java    From Nicobar with Apache License 2.0 2 votes vote down vote up
/**
 * Performs a CQL query asynchronously
 *
 * @param cql the CQL query string.
 * @return Future containing result row set.
 */
public Future<Rows<String, String>> selectAsync(String cql);
 
Example #24
Source File: MultiRowColumnIterator.java    From usergrid with Apache License 2.0 2 votes vote down vote up
public void advance() {


        if (logger.isTraceEnabled()) logger.trace( "Advancing multi row column iterator" );

        /**
         * If the edge is present, we need to being seeking from this
         */

        final boolean skipFirstColumn = startColumn != null;



        final int selectSize = skipFirstColumn ? pageSize + 1 : pageSize;

        final RangeBuilder rangeBuilder = new RangeBuilder();


        //set the range into the search

        if ( startColumn == null ) {
            columnSearch.buildRange( rangeBuilder );
        }
        else {
            columnSearch.buildRange( rangeBuilder, startColumn, null );
        }


        rangeBuilder.setLimit( selectSize );

        if (logger.isTraceEnabled()) logger.trace( "Executing cassandra query" );

        /**
         * Get our list of slices
         */
        final RowSliceQuery<R, C> query =
            keyspace.prepareQuery( cf ).setConsistencyLevel( consistencyLevel ).getKeySlice( rowKeys )
                .withColumnRange( rangeBuilder.build() );

        final Rows<R, C> result;
        try {
            result = query.execute().getResult();
        }
        catch ( ConnectionException e ) {
            throw new RuntimeException( "Unable to connect to casandra", e );
        }


        //now aggregate them together

        //this is an optimization.  It's faster to see if we only have values for one row,
        // then return the iterator of those columns than
        //do a merge if only one row has data.


        final List<T> mergedResults;

        if ( containsSingleRowOnly( result ) ) {
            mergedResults = singleRowResult( result );
        }
        else {
            mergedResults = mergeResults( result, selectSize );
        }





        //we've parsed everything truncate to the first pageSize, it's all we can ensure is correct without another
        //trip back to cassandra

        //discard our first element (maybe)



        final int size = mergedResults.size();

        moreToReturn = size == selectSize;

        //we have a first column to to check
        if( size > 0) {

            final T firstResult = mergedResults.get( 0 );

            //The search has either told us to skip the first element, or it matches our last, therefore we disregard it
            if(columnSearch.skipFirst( firstResult ) || (skipFirstColumn && comparator.compare( startColumn, firstResult ) == 0)){
                mergedResults.remove( 0 );
            }

        }


        if(moreToReturn && mergedResults.size() > 0){
            startColumn = mergedResults.get( mergedResults.size()  - 1 );
        }


        currentColumnIterator = mergedResults.iterator();

        if (logger.isTraceEnabled()) logger.trace( "Finished parsing {} rows for results", rowKeys.size() );
    }