com.netflix.astyanax.model.Column Java Examples

The following examples show how to use com.netflix.astyanax.model.Column. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 6 votes vote down vote up
private static StorageSummary toStorageSummary(ColumnList<Composite> columns) {
    if (columns.size() == 0) {
        return null;
    }

    // Read the summary column with the attributes, length etc.
    Column<Composite> summaryColumn = columns.getColumnByIndex(0);
    if (summaryColumn == null || !matches(summaryColumn.getName(), ColumnGroup.A, 0)) {
        return null;
    }
    StorageSummary summary = JsonHelper.fromJson(summaryColumn.getStringValue(), StorageSummary.class);

    // Check that all the chunks are available.  Some may still be in the process of being written or replicated.
    if (columns.size() < 1 + summary.getChunkCount()) {
        return null;
    }
    for (int chunkId = 0; chunkId < summary.getChunkCount(); chunkId++) {
        Column<Composite> presence = columns.getColumnByIndex(chunkId + 1);
        if (presence == null ||
                !matches(presence.getName(), ColumnGroup.B, chunkId) ||
                presence.getTimestamp() != summary.getTimestamp()) {
            return null;
        }
    }
    return summary;
}
 
Example #2
Source File: CassandraArchiveRepository.java    From Nicobar with Apache License 2.0 6 votes vote down vote up
/**
 * Get a summary of all archives in this Repository
 * @return List of summaries
 */
@Override
public List<ArchiveSummary> getArchiveSummaries() throws IOException {
    List<ArchiveSummary> summaries = new LinkedList<ArchiveSummary>();
    Iterable<Row<String, String>> rows;
    try {
            rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update, Columns.module_spec));
    } catch (Exception e) {
        throw new IOException(e);
    }

    for (Row<String, String> row : rows) {
        String moduleId = row.getKey();
        ColumnList<String> columns = row.getColumns();
        Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name());
        long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : 0;
        ScriptModuleSpec moduleSpec = getModuleSpec(columns);
        ArchiveSummary summary = new ArchiveSummary(ModuleId.fromString(moduleId), moduleSpec, updateTime, null);
        summaries.add(summary);
    }
    return summaries;
}
 
Example #3
Source File: CassandraArchiveRepository.java    From Nicobar with Apache License 2.0 6 votes vote down vote up
/**
 * Get the last update times of all of the script archives managed by this Repository.
 * @return map of moduleId to last update time
 */
@Override
public Map<ModuleId, Long> getArchiveUpdateTimes() throws IOException {
    Iterable<Row<String, String>> rows;
    try {
        rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update));
    } catch (Exception e) {
        throw new IOException(e);
    }
    Map<ModuleId, Long> updateTimes = new LinkedHashMap<ModuleId, Long>();
    for (Row<String, String> row : rows) {
        String moduleId = row.getKey();
        Column<String> lastUpdateColumn = row.getColumns().getColumnByName(Columns.last_update.name());
        Long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : null;
        if (StringUtils.isNotBlank(moduleId) && updateTime != null) {
            updateTimes.put(ModuleId.fromString(moduleId), updateTime);
        }
    }
    return updateTimes;
}
 
Example #4
Source File: CassandraStoreImpl.java    From recipes-rss with Apache License 2.0 6 votes vote down vote up
/**
 * Get the feed urls from Cassandra
 */
@Override
public List<String> getSubscribedUrls(String userId) throws Exception{
    OperationResult<ColumnList<String>> response;
    try {
        response = getKeyspace().prepareQuery(CF_SUBSCRIPTIONS).getKey(userId).execute();
    } catch (NotFoundException e) {
        logger.error("No record found for this user: " + userId);
        throw e;
    } catch (Exception t) {
        logger.error("Exception occurred when fetching from Cassandra: " + t);
        throw t;
    }

    final List<String> items = new ArrayList<String>();
    if (response != null) {
        final ColumnList<String> columns = response.getResult();
        for (Column<String> column : columns) {
            items.add(column.getName());
        }
    }

    return items;
}
 
Example #5
Source File: MvccEntitySerializationStrategyV3Impl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MvccEntity parseColumn( Column<Boolean> column ) {

    final EntityWrapper deSerialized;

    try {
        deSerialized = column.getValue( entityJsonSerializer );
    }
    catch ( DataCorruptionException e ) {
        log.error(
                "DATA CORRUPTION DETECTED when de-serializing entity with Id {}.  This means the"
                        + " write was truncated.", id, e );
        //return an empty entity, we can never load this one, and we don't want it to bring the system
        //to a grinding halt
        //TODO fix this
        return new MvccEntityImpl( id, UUIDGenerator.newTimeUUID(), MvccEntity.Status.DELETED, Optional.<Entity>absent() );
    }
    Optional<Entity> entity = deSerialized.getOptionalEntity() ;
    return new MvccEntityImpl( id, deSerialized.getVersion(), deSerialized.getStatus(), entity, deSerialized.getSize());
}
 
Example #6
Source File: MvccEntitySerializationStrategyImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public MvccEntity parseColumn( Column<UUID> column ) {

    final EntityWrapper deSerialized;
    final UUID version = column.getName();

    try {
        deSerialized = column.getValue( entityJsonSerializer );
    }
    catch ( DataCorruptionException e ) {
        logger.error(
                "DATA CORRUPTION DETECTED when de-serializing entity with Id {} and version {}.  This means the write was truncated.",
                id, version, e );
        //return an empty entity, we can never load this one, and we don't want it to bring the system
        //to a grinding halt
        return new MvccEntityImpl( id, version, MvccEntity.Status.DELETED, Optional.<Entity>absent(),0 );
    }

    //Inject the id into it.
    if ( deSerialized.entity.isPresent() ) {
        EntityUtils.setId( deSerialized.entity.get(), id );
    }

    return new MvccEntityImpl( id, version, deSerialized.status, deSerialized.entity, 0 );
}
 
Example #7
Source File: AShardStateIO.java    From blueflood with Apache License 2.0 6 votes vote down vote up
@Override
public Collection<SlotState> getShardState(int shard) throws IOException {
    AstyanaxIO astyanaxIO = AstyanaxIO.singleton();
    Timer.Context ctx = Instrumentation.getReadTimerContext(CassandraModel.CF_METRICS_STATE_NAME);
    final Collection<SlotState> slotStates = new LinkedList<SlotState>();
    try {
        ColumnList<SlotState> columns = astyanaxIO.getKeyspace().prepareQuery(CassandraModel.CF_METRICS_STATE)
                .getKey((long)shard)
                .execute()
                .getResult();

        for (Column<SlotState> column : columns) {
            slotStates.add(column.getName()
                             .withTimestamp(column.getLongValue())
                             .withLastUpdatedTimestamp(column.getTimestamp() / 1000)); //write time is in micro seconds
        }
    } catch (ConnectionException e) {
        Instrumentation.markReadError(e);
        LOG.error("Error getting shard state for shard " + shard, e);
        throw new IOException(e);
    } finally {
        ctx.stop();
    }
    return slotStates;
}
 
Example #8
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<? extends HistoryMigrationScanResult> getHistoriesForStorage(AstyanaxStorage source) {

    DeltaPlacement placement = (DeltaPlacement) source.getPlacement();
    ColumnFamily<ByteBuffer, UUID> cf = placement.getDeltaHistoryColumnFamily();

    return Iterators.concat(Iterators.transform(source.scanIterator(null), keyRange -> {
        Iterator<Row<ByteBuffer, UUID>> rows =
                rowScan(placement, cf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG);

        return Iterators.concat(Iterators.transform(rows, row -> {
            ColumnList<UUID> columns = row.getColumns();
            Iterator<Column<UUID>> concatColumns = columns.iterator();
            if (columns.size() >= _maxColumnsRange.getLimit()) {
                UUID lastColumn = row.getColumns().getColumnByIndex(columns.size() - 1).getName();
                concatColumns = Iterators.concat(concatColumns, columnScan(row.getRawKey(), placement, cf, lastColumn, null,
                        false, _uuidInc, Long.MAX_VALUE, 1, ReadConsistency.STRONG));
            }
            return Iterators.transform(concatColumns, column -> new HistoryMigrationScanResult(row.getRawKey(), column.getName(), column.getByteBufferValue(), column.getTtl()));
        }));
    }));
}
 
Example #9
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<? extends MigrationScanResult> getDeltasForStorage(AstyanaxStorage source) {
    DeltaPlacement sourcePlacement = (DeltaPlacement) source.getPlacement();
    ColumnFamily<ByteBuffer, DeltaKey> sourceCf = sourcePlacement.getBlockedDeltaColumnFamily();

    Iterator<ByteBufferRange> scanIter = source.scanIterator(null);

    return Iterators.concat(Iterators.transform(scanIter, keyRange -> {
        Iterator<Row<ByteBuffer, DeltaKey>> rows =
                rowScan(sourcePlacement, sourceCf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG);

        return Iterators.concat(Iterators.transform(rows, row -> {
            ColumnList<DeltaKey> columns = row.getColumns();
            Iterator<Column<DeltaKey>> concatColumns = columns.iterator();
            if (columns.size() >= _maxColumnsRange.getLimit()) {
                DeltaKey lastColumn = row.getColumns().getColumnByIndex(columns.size() - 1).getName();
                concatColumns = Iterators.concat(concatColumns, columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
                        false, _deltaKeyInc, Long.MAX_VALUE, 1, ReadConsistency.STRONG));
            }

            Iterator<StitchedColumn> uuidColumns = new AstyanaxDeltaIterator(concatColumns, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex(row.getRawKey()));

            return Iterators.transform(uuidColumns, column -> new MigrationScanResult(row.getRawKey(), column.getName(), _daoUtils.skipPrefix(column.getByteBufferValue())));
        }));
    }));
}
 
Example #10
Source File: AstyanaxStorageProvider.java    From emodb with Apache License 2.0 6 votes vote down vote up
private static void deleteDataColumns(AstyanaxTable table, String blobId, ColumnList<Composite> columns, ConsistencyLevel consistency, Long timestamp) {
    for (AstyanaxStorage storage : table.getWriteStorage()) {
        BlobPlacement placement = (BlobPlacement) storage.getPlacement();

        // Any columns with a timestamp older than the one we expect must be from an old version
        // of the blob.  This should be rare, but if it happens clean up and delete the old data.
        MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(consistency);
        ColumnListMutation<Composite> row = mutation.withRow(
                placement.getBlobColumnFamily(), storage.getRowKey(blobId));
        boolean found = false;
        for (Column<Composite> column : columns) {
            if (null != timestamp && column.getTimestamp() < timestamp) {
                if (ColumnGroup.B.name().equals(column.getName().get(0, AsciiSerializer.get()))) {
                    int chunkId = column.getName().get(1, IntegerSerializer.get());
                    row.deleteColumn(getColumn(ColumnGroup.B, chunkId))
                            .deleteColumn(getColumn(ColumnGroup.Z, chunkId));
                    found = true;
                }
            }
        }
        if (found) {
            execute(mutation);
        }
    }
}
 
Example #11
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
void readAll(String channel, SlabFilter filter, EventSink sink, boolean weak) {
    // PeekingIterator is needed so that we can look ahead and see the next slab Id
    PeekingIterator<Column<ByteBuffer>> manifestColumns = Iterators.peekingIterator(readManifestForChannel(channel, weak));

    while (manifestColumns.hasNext()) {
        Column<ByteBuffer> manifestColumn = manifestColumns.next();
        ByteBuffer slabId = manifestColumn.getName();
        ByteBuffer nextSlabId = manifestColumns.hasNext() ? manifestColumns.peek().getName() : null;
        boolean open = manifestColumn.getBooleanValue();
        if (filter != null && !filter.accept(slabId, open, nextSlabId)) {
            continue;
        }
        if (!readSlab(channel, slabId, new SlabCursor(), open, sink)) {
            break;
        }
    }
}
 
Example #12
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) {
    // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since
    // we always delete smaller column values before deleting larger column values--scanning will hit the max
    // before needing to skip over tombstones.
    Map<UUID, ByteBuffer> resultMap = Maps.newHashMap();
    for (List<UUID> batch : Iterables.partition(dataIds, 10)) {
        Rows<UUID, ByteBuffer> rows = execute(
                _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
                        .getKeySlice(batch)
                        .withColumnRange(new RangeBuilder()
                                .setReversed(true)
                                .setLimit(1)
                                .build()));
        for (Row<UUID, ByteBuffer> row : rows) {
            UUID dataId = row.getKey();
            for (Column<ByteBuffer> column : row.getColumns()) {
                resultMap.put(dataId, column.getName());
            }
        }
    }
    return resultMap;
}
 
Example #13
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<ByteBuffer> scanRecords(UUID dataId, @Nullable ByteBuffer from, @Nullable final ByteBuffer to,
                                        int batchSize, int limit) {
    final Iterator<Column<ByteBuffer>> iter = executePaginated(
            _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(dataId)
                    .withColumnRange(new RangeBuilder()
                            .setStart(Objects.firstNonNull(from, EMPTY_BUFFER))
                            .setEnd(Objects.firstNonNull(to, EMPTY_BUFFER))
                            .setLimit(batchSize)
                            .build())
                    .autoPaginate(true));

    return Iterators.limit(new AbstractIterator<ByteBuffer>() {
        @Override
        protected ByteBuffer computeNext() {
            while (iter.hasNext()) {
                ByteBuffer record = iter.next().getName();
                if (!record.equals(to)) {  // To is exclusive
                    return record;
                }
            }
            return endOfData();
        }
    }, limit);
}
 
Example #14
Source File: SimpleReverseIndexer.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, String> getTags(String id) throws IndexerException {
    try {
        ColumnList<String> fields = keyspace.prepareQuery(dataCf).getRow(id).execute().getResult();
        Map<String, String> mapped = Maps.newHashMap();
        for (Column<String> column : fields) {
            mapped.put(column.getName(),  column.getStringValue());
        }
        return mapped;
    } catch (ConnectionException e) {
        throw new IndexerException("Failed to get tags for id " + id, e);
    }
}
 
Example #15
Source File: CassandraStorage.java    From greycat with Apache License 2.0 5 votes vote down vote up
@Override
public void get(Buffer keys, Callback<Buffer> callback) {
    try {
        BufferIterator it = keys.iterator();
        final List<byte[]> all_keys = new ArrayList<byte[]>();
        final Map<byte[], byte[]> results = new HashMap<byte[], byte[]>();
        while (it.hasNext()) {
            Buffer keyView = it.next();
            if (keyView != null) {
                all_keys.add(keyView.data());
            }
        }
        Rows<byte[], Integer> rows = keyspace.prepareQuery(MWG).getKeySlice(all_keys).execute().getResult();
        for (int i = 0; i < rows.size(); i++) {
            Row<byte[], Integer> row = rows.getRowByIndex(i);
            if(row != null){
                Column col = row.getColumns().getColumnByName(0);
                if(col != null){
                    results.put(row.getKey(), col.getByteArrayValue());
                }
            }
        }
        Buffer result = graph.newBuffer();
        for (int i = 0; i < all_keys.size(); i++) {
            if (i != 0) {
                result.write(Constants.BUFFER_SEP);
            }
            byte[] resolved = results.get(all_keys.get(i));
            if(resolved != null){
                result.writeAll(resolved);

            }
        }
        callback.on(result);
    } catch (Exception e) {
        e.printStackTrace();
    }

}
 
Example #16
Source File: CassandraArchiveRepository.java    From Nicobar with Apache License 2.0 5 votes vote down vote up
private ScriptModuleSpec getModuleSpec(ColumnList<String> columns) {
    ScriptModuleSpec moduleSpec = null;
    if (columns != null) {
        Column<String> moduleSpecColumn = columns.getColumnByName(Columns.module_spec.name());
        if (moduleSpecColumn != null && moduleSpecColumn.hasValue()) {
            String moduleSpecString = moduleSpecColumn.getStringValue();
            moduleSpec = getConfig().getModuleSpecSerializer().deserialize(moduleSpecString);
        }
    }
    return moduleSpec;
}
 
Example #17
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
public Map<UUID, String> loadSegments(String queue) {
    Map<UUID, String> resultMap = Maps.newHashMap();
    Iterator<Column<UUID>> iter = executePaginated(
            _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(queue)
                    .withColumnRange(new RangeBuilder().setLimit(100).build())
                    .autoPaginate(true));
    while (iter.hasNext()) {
        Column<UUID> column = iter.next();
        resultMap.put(column.getName(), column.getStringValue());
    }
    return resultMap;
}
 
Example #18
Source File: NodeSerializationImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Override
public Optional<Long> getMaxVersion( final ApplicationScope scope, final Id node ) {
    ValidationUtils.validateApplicationScope( scope );
    ValidationUtils.verifyIdentity( node );

    ColumnFamilyQuery<ScopedRowKey<Id>, Boolean> query =
            keyspace.prepareQuery( GRAPH_DELETE ).setConsistencyLevel( fig.getReadCL() );



    Column<Boolean> result = null;
    try {
        result = query.getKey( ScopedRowKey.fromKey( scope.getApplication(), node ) ).getColumn( COLUMN_NAME ).execute()
                .getResult();
    }
    catch(NotFoundException nfe){
         //swallow, there's just no column
        return Optional.absent();
    }
    catch ( ConnectionException e ) {
        throw new RuntimeException( "Unable to connect to casandra", e );
    }

    return Optional.of( result.getLongValue() );


}
 
Example #19
Source File: AstyanaxQueueDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
/** Executes a {@code RowQuery} with {@code autoPaginate(true)} repeatedly as necessary to fetch all pages. */
private <K, C> Iterator<Column<C>> executePaginated(final RowQuery<K, C> query) {
    return Iterators.concat(new AbstractIterator<Iterator<Column<C>>>() {
        @Override
        protected Iterator<Column<C>> computeNext() {
            ColumnList<C> page = execute(query);
            return !page.isEmpty() ? page.iterator() : endOfData();
        }
    });
}
 
Example #20
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public QueryResult listRows(String cursor, Integer rowLimit, Integer columnLimit) throws PaasException {
    try {
        invariant();
        
        // Execute the query
        Partitioner partitioner = keyspace.getPartitioner();
        Rows<ByteBuffer, ByteBuffer> result = keyspace
            .prepareQuery(columnFamily)
            .getKeyRange(null,  null, cursor != null ? cursor : partitioner.getMinToken(),  partitioner.getMaxToken(),  rowLimit)
            .execute()
            .getResult();
        
        // Convert raw data into a simple sparse tree
        SchemalessRows.Builder builder = SchemalessRows.builder();
        for (Row<ByteBuffer, ByteBuffer> row : result) { 
            Map<String, String> columns = Maps.newHashMap();
            for (Column<ByteBuffer> column : row.getColumns()) {
                columns.put(serializers.columnAsString(column.getRawName()), serializers.valueAsString(column.getRawName(), column.getByteBufferValue()));
            }
            builder.addRow(serializers.keyAsString(row.getKey()), columns);
        }
        
        QueryResult dr = new QueryResult();
        dr.setSrows(builder.build());
        
        if (!result.isEmpty()) {
            dr.setCursor(partitioner.getTokenForKey(Iterables.getLast(result).getKey()));
        }
        return dr;
    } catch (ConnectionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return null;
}
 
Example #21
Source File: AstyanaxSupport.java    From brooklyn-library with Apache License 2.0 5 votes vote down vote up
/**
 * Read from a {@link CassandraNode} using the Astyanax API.
 * @throws ConnectionException 
 */
public void readData(String keyspaceName) throws ConnectionException {
    // Create context
    AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName);
    try {
        Keyspace keyspace = context.getEntity();

        // Query data
        OperationResult<ColumnList<String>> query = keyspace.prepareQuery(sampleColumnFamily)
                .getKey("one")
                .execute();
        assertEquals(query.getHost().getHostName(), hostname);
        assertTrue(query.getLatency() > 0L);

        ColumnList<String> columns = query.getResult();
        assertEquals(columns.size(), 2);

        // Lookup columns in response by name
        String name = columns.getColumnByName("name").getStringValue();
        assertEquals(name, "Alice");

        // Iterate through the columns
        for (Column<String> c : columns) {
            assertTrue(ImmutableList.of("name", "company").contains(c.getName()));
        }
    } finally {
        context.shutdown();
    }
}
 
Example #22
Source File: MultiScanCutoffTimeTest.java    From emodb with Apache License 2.0 5 votes vote down vote up
private Column<DeltaKey> astyanaxColumn(UUID uuidValue, String value) {
    Column<DeltaKey> column = mock(Column.class);
    when(column.getName()).thenReturn(new DeltaKey(uuidValue, 0));
    when(column.getStringValue()).thenReturn(value);

    return column;
}
 
Example #23
Source File: MultiScanCutoffTimeTest.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Test
public void testAstyanaxColumnFilteringBasedOnCutoffTime()
        throws Exception {
    long nowInTimeMillis = System.currentTimeMillis();

    UUID uuid1 = TimeUUIDs.uuidForTimeMillis(nowInTimeMillis);
    UUID uuid2 = TimeUUIDs.uuidForTimeMillis(nowInTimeMillis + 5000);
    UUID uuid3 = TimeUUIDs.uuidForTimeMillis(nowInTimeMillis + 10000);
    UUID uuid4 = TimeUUIDs.uuidForTimeMillis(nowInTimeMillis + 15000);
    UUID uuid5 = TimeUUIDs.uuidForTimeMillis(nowInTimeMillis + 20000);

    Column<DeltaKey> col1 = astyanaxColumn(uuid1, "a");
    Column<DeltaKey> col2 = astyanaxColumn(uuid2, "b");
    Column<DeltaKey> col3 = astyanaxColumn(uuid3, "c");
    Column<DeltaKey> col4 = astyanaxColumn(uuid4, "d");
    Column<DeltaKey> col5 = astyanaxColumn(uuid5, "e");

    final Iterable<Column<DeltaKey>> columns = Arrays.asList(col1, col2, col3, col4, col5);
    assertEquals(Iterators.size(columns.iterator()), 5);

    Iterator<Column<DeltaKey>> filteredColumnIter = AstyanaxBlockedDataReaderDAO.getFilteredColumnIter(columns.iterator(), null);
    assertEquals(Iterators.size(filteredColumnIter), 5);

    filteredColumnIter = AstyanaxBlockedDataReaderDAO.getFilteredColumnIter(columns.iterator(), Instant.ofEpochMilli(nowInTimeMillis - 1000));
    assertEquals(Iterators.size(filteredColumnIter), 0);

    filteredColumnIter = AstyanaxBlockedDataReaderDAO.getFilteredColumnIter(columns.iterator(), Instant.ofEpochMilli(nowInTimeMillis + 12000));
    assertEquals(Iterators.size(filteredColumnIter), 3);

    filteredColumnIter = AstyanaxBlockedDataReaderDAO.getFilteredColumnIter(columns.iterator(), Instant.ofEpochMilli(nowInTimeMillis + 21000));
    assertEquals(Iterators.size(filteredColumnIter), 5);
}
 
Example #24
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
/**
 * Reads the ordered manifest for a channel.  The read can either be weak or strong.  A weak read will use CL1
 * and may use the cached oldest slab from a previous strong call to improve performance.  A strong read will use
 * CL local_quorum and will always read the entire manifest row.  This makes a weak read significantly faster than a
 * strong read but also means the call is not guaranteed to return the entire manifest.  Because of this at least
 * every 10 seconds a weak read for a channel is automatically promoted to a strong read.
 *
 * The vast majority of calls to this method are performed during a "peek" or "poll" operation.  Since these are
 * typically called repeatedly a weak call provides improved performance while guaranteeing that at least every
 * 10 seconds the manifest is strongly read so no slabs are missed over time.  Calls which must guarantee
 * the full manifest should explicitly request strong consistency.
 */
private Iterator<Column<ByteBuffer>> readManifestForChannel(final String channel, final boolean weak) {
    final ByteBuffer oldestSlab = weak ? _oldestSlab.getIfPresent(channel) : null;
    final ConsistencyLevel consistency;

    RangeBuilder range = new RangeBuilder().setLimit(50);
    if (oldestSlab != null) {
        range.setStart(oldestSlab);
        consistency = ConsistencyLevel.CL_LOCAL_ONE;
    } else {
        consistency = ConsistencyLevel.CL_LOCAL_QUORUM;
    }

    final Iterator<Column<ByteBuffer>> manifestColumns = executePaginated(
            _keyspace.prepareQuery(ColumnFamilies.MANIFEST, consistency)
                    .getKey(channel)
                    .withColumnRange(range.build())
                    .autoPaginate(true));

    if (oldestSlab != null) {
        // Query was executed weakly using the cached oldest slab, so don't update the cache with an unreliable oldest value
        return manifestColumns;
    } else {
        PeekingIterator<Column<ByteBuffer>> peekingManifestColumns = Iterators.peekingIterator(manifestColumns);
        if (peekingManifestColumns.hasNext()) {
            // Cache the first slab returned from querying the full manifest column family since it is the oldest.
            cacheOldestSlabForChannel(channel, TimeUUIDSerializer.get().fromByteBuffer(peekingManifestColumns.peek().getName()));
            return peekingManifestColumns;
        } else {
            // Channel was completely empty.  Cache a TimeUUID for the current time.  This will cause future calls
            // to read at most 1 minute of tombstones until the cache expires 10 seconds later.
            cacheOldestSlabForChannel(channel, TimeUUIDs.newUUID());
            return Iterators.emptyIterator();
        }
    }
}
 
Example #25
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
/** Executes a {@code RowQuery} with {@code autoPaginate(true)} repeatedly as necessary to fetch all pages. */
private <K, C> Iterator<Column<C>> executePaginated(final RowQuery<K, C> query) {
    return Iterators.concat(new AbstractIterator<Iterator<Column<C>>>() {
        @Override
        protected Iterator<Column<C>> computeNext() {
            ColumnList<C> page = execute(query);
            return !page.isEmpty() ? page.iterator() : endOfData();
        }
    });
}
 
Example #26
Source File: AstyanaxEventReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
public boolean moveIfFast(String fromChannel, String toChannel) {
    Iterator<Column<ByteBuffer>> manifestColumns = executePaginated(
            _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM)
                    .getKey(fromChannel)
                    .withColumnRange(new RangeBuilder().setLimit(50).build())
                    .autoPaginate(true));

    List<ByteBuffer> closedSlabs = Lists.newArrayList();
    boolean movedAll = true;
    while (manifestColumns.hasNext()) {
        Column<ByteBuffer> manifestColumn = manifestColumns.next();
        ByteBuffer slabId = manifestColumn.getName();
        boolean open = manifestColumn.getBooleanValue();
        if (open) {
            // Can't safely re-assign open slabs to another channel since writers may still be writing.
            movedAll = false;  // All events in the open slab might be deleted, but don't check for that here.
            continue;
        }
        closedSlabs.add(slabId);
        if (closedSlabs.size() >= SLAB_MOVE_BATCH) {
            _manifestPersister.move(fromChannel, toChannel, closedSlabs, false);
            closedSlabs.clear();
        }
    }
    if (!closedSlabs.isEmpty()) {
        _manifestPersister.move(fromChannel, toChannel, closedSlabs, false);
    }

    return movedAll;
}
 
Example #27
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public static Iterator<Column<DeltaKey>> getFilteredColumnIter(Iterator<Column<DeltaKey>> columnIter, @Nullable Instant cutoffTime) {
    if (cutoffTime == null) {
        return columnIter;
    }
    return Iterators.filter(columnIter, column -> (TimeUUIDs.getTimeMillis(column.getName().getChangeId()) < cutoffTime.toEpochMilli()));
}
 
Example #28
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
private Iterator<RecordEntryRawMetadata> rawMetadata(final Iterator<StitchedColumn> iter) {
    return Iterators.transform(iter, new Function<Column<UUID>, RecordEntryRawMetadata>() {
        @Override
        public RecordEntryRawMetadata apply(Column<UUID> column) {
            return new RecordEntryRawMetadata()
                    .withTimestamp(TimeUUIDs.getTimeMillis(column.getName()))
                    .withSize(_daoUtils.skipPrefix(column.getByteBufferValue()).remaining());
        }
    });
}
 
Example #29
Source File: AstyanaxBlockedDataReaderDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<DeltaKey> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) {

        Iterator<Column<DeltaKey>> changeIter = getFilteredColumnIter(columns.iterator(), cutoffTime);
        Iterator<Column<DeltaKey>> compactionIter = getFilteredColumnIter(columns.iterator(), cutoffTime);
        Iterator<Column<DeltaKey>> rawMetadataIter = getFilteredColumnIter(columns.iterator(), cutoffTime);

        if (columns.size() >= largeRowThreshold) {
            // A large row such that the first query likely returned only a subset of all the columns.  Lazily fetch
            // the rest while ensuring we never load all columns into memory at the same time.  The current
            // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to
            // find deltas.  So we must call columnScan() twice, once for each.
            DeltaKey lastColumn = columns.getColumnByIndex(columns.size() - 1).getName();

            AstyanaxTable table = (AstyanaxTable) key.getTable();
            AstyanaxStorage storage = table.getReadStorage();
            DeltaPlacement placement = (DeltaPlacement) storage.getPlacement();
            ColumnFamily<ByteBuffer, DeltaKey> columnFamily = placement.getBlockedDeltaColumnFamily();

            // Execute the same scan 3 times, returning 3 iterators that process the results in different ways.  In
            // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan
            // should avoid actually doing any work until the first item is fetched from the iterator.
            changeIter = Iterators.concat(changeIter,
                    getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime));
            compactionIter = Iterators.concat(compactionIter,
                    getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime));
            rawMetadataIter = Iterators.concat(rawMetadataIter,
                    getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime));
        }

        Iterator<Map.Entry<DeltaClusteringKey, Change>> deltaChangeIter = decodeChanges(new AstyanaxDeltaIterator(changeIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey))));
        Iterator<Map.Entry<DeltaClusteringKey, Compaction>> deltaCompactionIter = decodeCompactions(new AstyanaxDeltaIterator(compactionIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey))));
        Iterator<RecordEntryRawMetadata> deltaRawMetadataIter = rawMetadata(new AstyanaxDeltaIterator(rawMetadataIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey))));

        return new RecordImpl(key, deltaCompactionIter, deltaChangeIter, deltaRawMetadataIter);
    }
 
Example #30
Source File: StringColumnParser.java    From usergrid with Apache License 2.0 4 votes vote down vote up
@Override
public String parseColumn( final Column<String> column ) {
    return column.getName();
}