com.netflix.astyanax.model.ColumnList Java Examples
The following examples show how to use
com.netflix.astyanax.model.ColumnList.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 6 votes |
@ParameterizedTimed(type = "AstyanaxStorageProvider") @Override public void deleteObject(Table tbl, String blobId) { AstyanaxTable table = (AstyanaxTable) Objects.requireNonNull(tbl, "table"); for (AstyanaxStorage storage : table.getWriteStorage()) { BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Do a column range query on all the B and Z columns. Don't get the A columns with the metadata. Composite start = getColumnPrefix(ColumnGroup.B, Composite.ComponentEquality.LESS_THAN_EQUAL); Composite end = getColumnPrefix(ColumnGroup.Z, Composite.ComponentEquality.GREATER_THAN_EQUAL); ColumnList<Composite> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlobColumnFamily(), _readConsistency) .getKey(storage.getRowKey(blobId)) .withColumnRange(start, end, false, Integer.MAX_VALUE)); deleteDataColumns(table, blobId, columns, CONSISTENCY_STRONG, null); _blobDeleteMeter.mark(); } }
Example #2
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 6 votes |
private static StorageSummary toStorageSummary(ColumnList<Composite> columns) { if (columns.size() == 0) { return null; } // Read the summary column with the attributes, length etc. Column<Composite> summaryColumn = columns.getColumnByIndex(0); if (summaryColumn == null || !matches(summaryColumn.getName(), ColumnGroup.A, 0)) { return null; } StorageSummary summary = JsonHelper.fromJson(summaryColumn.getStringValue(), StorageSummary.class); // Check that all the chunks are available. Some may still be in the process of being written or replicated. if (columns.size() < 1 + summary.getChunkCount()) { return null; } for (int chunkId = 0; chunkId < summary.getChunkCount(); chunkId++) { Column<Composite> presence = columns.getColumnByIndex(chunkId + 1); if (presence == null || !matches(presence.getName(), ColumnGroup.B, chunkId) || presence.getTimestamp() != summary.getTimestamp()) { return null; } } return summary; }
Example #3
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 6 votes |
private static void deleteDataColumns(AstyanaxTable table, String blobId, ColumnList<Composite> columns, ConsistencyLevel consistency, Long timestamp) { for (AstyanaxStorage storage : table.getWriteStorage()) { BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Any columns with a timestamp older than the one we expect must be from an old version // of the blob. This should be rare, but if it happens clean up and delete the old data. MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(consistency); ColumnListMutation<Composite> row = mutation.withRow( placement.getBlobColumnFamily(), storage.getRowKey(blobId)); boolean found = false; for (Column<Composite> column : columns) { if (null != timestamp && column.getTimestamp() < timestamp) { if (ColumnGroup.B.name().equals(column.getName().get(0, AsciiSerializer.get()))) { int chunkId = column.getName().get(1, IntegerSerializer.get()); row.deleteColumn(getColumn(ColumnGroup.B, chunkId)) .deleteColumn(getColumn(ColumnGroup.Z, chunkId)); found = true; } } } if (found) { execute(mutation); } } }
Example #4
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 6 votes |
/** * Get a summary of all archives in this Repository * @return List of summaries */ @Override public List<ArchiveSummary> getArchiveSummaries() throws IOException { List<ArchiveSummary> summaries = new LinkedList<ArchiveSummary>(); Iterable<Row<String, String>> rows; try { rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update, Columns.module_spec)); } catch (Exception e) { throw new IOException(e); } for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : 0; ScriptModuleSpec moduleSpec = getModuleSpec(columns); ArchiveSummary summary = new ArchiveSummary(ModuleId.fromString(moduleId), moduleSpec, updateTime, null); summaries.add(summary); } return summaries; }
Example #5
Source File: Migration.java From blueflood with Apache License 2.0 | 6 votes |
private static void checkSameResults(ColumnList<Long> x, ColumnList<Long> y) throws Exception { if (x.size() != y.size()) { throw new Exception("source and destination column lengths do not match"); } if (Sets.difference(new HashSet<Long>(x.getColumnNames()), new HashSet<Long>(y.getColumnNames())).size() != 0) { throw new Exception("source and destination did not contain the same column names"); } for (int i = 0; i < x.size(); i++) { byte[] bx = x.getColumnByIndex(i).getByteArrayValue(); byte[] by = y.getColumnByIndex(i).getByteArrayValue(); if (bx.length != by.length) { throw new Exception("source and destination column values did not match for column " + i); } // only examine every third byte. for (int j = 0; j < bx.length; j+=3) { if (bx[j] != by[j]) { throw new Exception("source and destination column values did not match for column " + i); } } } }
Example #6
Source File: CassandraStoreImpl.java From recipes-rss with Apache License 2.0 | 6 votes |
/** * Get the feed urls from Cassandra */ @Override public List<String> getSubscribedUrls(String userId) throws Exception{ OperationResult<ColumnList<String>> response; try { response = getKeyspace().prepareQuery(CF_SUBSCRIPTIONS).getKey(userId).execute(); } catch (NotFoundException e) { logger.error("No record found for this user: " + userId); throw e; } catch (Exception t) { logger.error("Exception occurred when fetching from Cassandra: " + t); throw t; } final List<String> items = new ArrayList<String>(); if (response != null) { final ColumnList<String> columns = response.getResult(); for (Column<String> column : columns) { items.add(column.getName()); } } return items; }
Example #7
Source File: AShardStateIO.java From blueflood with Apache License 2.0 | 6 votes |
@Override public Collection<SlotState> getShardState(int shard) throws IOException { AstyanaxIO astyanaxIO = AstyanaxIO.singleton(); Timer.Context ctx = Instrumentation.getReadTimerContext(CassandraModel.CF_METRICS_STATE_NAME); final Collection<SlotState> slotStates = new LinkedList<SlotState>(); try { ColumnList<SlotState> columns = astyanaxIO.getKeyspace().prepareQuery(CassandraModel.CF_METRICS_STATE) .getKey((long)shard) .execute() .getResult(); for (Column<SlotState> column : columns) { slotStates.add(column.getName() .withTimestamp(column.getLongValue()) .withLastUpdatedTimestamp(column.getTimestamp() / 1000)); //write time is in micro seconds } } catch (ConnectionException e) { Instrumentation.markReadError(e); LOG.error("Error getting shard state for shard " + shard, e); throw new IOException(e); } finally { ctx.stop(); } return slotStates; }
Example #8
Source File: AstyanaxBlockedDataReaderDAO.java From emodb with Apache License 2.0 | 6 votes |
@Timed (name = "bv.emodb.sor.AstyanaxDataReaderDAO.read", absolute = true) @Override public Record read(Key key, ReadConsistency consistency) { checkNotNull(key, "key"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ByteBuffer rowKey = storage.getRowKey(key.getKey()); // Query for Delta & Compaction info, just the first 50 columns for now. ColumnList<DeltaKey> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_maxColumnsRange), "read record at placement %s, table %s, key %s", placement.getName(), table.getName(), key.getKey()); // Track metrics _randomReadMeter.mark(); // Convert the results into a Record object, lazily fetching the rest of the columns as necessary. return newRecord(key, rowKey, columns, _maxColumnsRange.getLimit(), consistency, null); }
Example #9
Source File: AstyanaxBlockedDataReaderDAO.java From emodb with Apache License 2.0 | 6 votes |
@Override public Iterator<? extends MigrationScanResult> getDeltasForStorage(AstyanaxStorage source) { DeltaPlacement sourcePlacement = (DeltaPlacement) source.getPlacement(); ColumnFamily<ByteBuffer, DeltaKey> sourceCf = sourcePlacement.getBlockedDeltaColumnFamily(); Iterator<ByteBufferRange> scanIter = source.scanIterator(null); return Iterators.concat(Iterators.transform(scanIter, keyRange -> { Iterator<Row<ByteBuffer, DeltaKey>> rows = rowScan(sourcePlacement, sourceCf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG); return Iterators.concat(Iterators.transform(rows, row -> { ColumnList<DeltaKey> columns = row.getColumns(); Iterator<Column<DeltaKey>> concatColumns = columns.iterator(); if (columns.size() >= _maxColumnsRange.getLimit()) { DeltaKey lastColumn = row.getColumns().getColumnByIndex(columns.size() - 1).getName(); concatColumns = Iterators.concat(concatColumns, columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, ReadConsistency.STRONG)); } Iterator<StitchedColumn> uuidColumns = new AstyanaxDeltaIterator(concatColumns, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex(row.getRawKey())); return Iterators.transform(uuidColumns, column -> new MigrationScanResult(row.getRawKey(), column.getName(), _daoUtils.skipPrefix(column.getByteBufferValue()))); })); })); }
Example #10
Source File: AstyanaxBlockedDataReaderDAO.java From emodb with Apache License 2.0 | 6 votes |
@Override public Iterator<? extends HistoryMigrationScanResult> getHistoriesForStorage(AstyanaxStorage source) { DeltaPlacement placement = (DeltaPlacement) source.getPlacement(); ColumnFamily<ByteBuffer, UUID> cf = placement.getDeltaHistoryColumnFamily(); return Iterators.concat(Iterators.transform(source.scanIterator(null), keyRange -> { Iterator<Row<ByteBuffer, UUID>> rows = rowScan(placement, cf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG); return Iterators.concat(Iterators.transform(rows, row -> { ColumnList<UUID> columns = row.getColumns(); Iterator<Column<UUID>> concatColumns = columns.iterator(); if (columns.size() >= _maxColumnsRange.getLimit()) { UUID lastColumn = row.getColumns().getColumnByIndex(columns.size() - 1).getName(); concatColumns = Iterators.concat(concatColumns, columnScan(row.getRawKey(), placement, cf, lastColumn, null, false, _uuidInc, Long.MAX_VALUE, 1, ReadConsistency.STRONG)); } return Iterators.transform(concatColumns, column -> new HistoryMigrationScanResult(row.getRawKey(), column.getName(), column.getByteBufferValue(), column.getTtl())); })); })); }
Example #11
Source File: MvccLogEntrySerializationStrategyImpl.java From usergrid with Apache License 2.0 | 6 votes |
@Override public List<MvccLogEntry> loadReversed( final ApplicationScope applicationScope, final Id entityId, final UUID minVersion, final int maxSize ) { ColumnList<UUID> columns; try { final Id applicationId = applicationScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ) .withColumnRange( minVersion, null, true, maxSize ).execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
Example #12
Source File: SimpleReverseIndexer.java From staash with Apache License 2.0 | 5 votes |
@Override public Map<String, String> getTags(String id) throws IndexerException { try { ColumnList<String> fields = keyspace.prepareQuery(dataCf).getRow(id).execute().getResult(); Map<String, String> mapped = Maps.newHashMap(); for (Column<String> column : fields) { mapped.put(column.getName(), column.getStringValue()); } return mapped; } catch (ConnectionException e) { throw new IndexerException("Failed to get tags for id " + id, e); } }
Example #13
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 5 votes |
@ParameterizedTimed(type = "AstyanaxStorageProvider") @Override public StorageSummary readMetadata(Table tbl, String blobId) { AstyanaxTable table = (AstyanaxTable) Objects.requireNonNull(tbl, "table"); Objects.requireNonNull(blobId, "blobId"); AstyanaxStorage storage = table.getReadStorage(); BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Do a column range query on all the A and B columns. Don't get the Z columns with the binary data. Composite start = getColumnPrefix(ColumnGroup.A, Composite.ComponentEquality.LESS_THAN_EQUAL); Composite end = getColumnPrefix(ColumnGroup.B, Composite.ComponentEquality.GREATER_THAN_EQUAL); ColumnList<Composite> columns = execute(placement.getKeyspace() .prepareQuery(placement.getBlobColumnFamily(), _readConsistency) .getKey(storage.getRowKey(blobId)) .withColumnRange(start, end, false, Integer.MAX_VALUE)); StorageSummary summary = toStorageSummary(columns); if (summary == null) { return null; } // TODO should be removed for blob s3 migration // Cleanup older versions of the blob, if any (unlikely). deleteDataColumns(table, blobId, columns, ConsistencyLevel.CL_ANY, summary.getTimestamp()); _blobMetadataReadMeter.mark(); return summary; }
Example #14
Source File: MvccLogEntrySerializationStrategyImpl.java From usergrid with Apache License 2.0 | 5 votes |
@Override public List<MvccLogEntry> load( final ApplicationScope collectionScope, final Id entityId, final UUID version, final int maxSize ) { Preconditions.checkNotNull( collectionScope, "collectionScope is required" ); Preconditions.checkNotNull( entityId, "entity id is required" ); Preconditions.checkNotNull( version, "version is required" ); Preconditions.checkArgument( maxSize > 0, "max Size must be greater than 0" ); ColumnList<UUID> columns; try { final Id applicationId = collectionScope.getApplication(); final ScopedRowKey<K> rowKey = createKey( applicationId, entityId ); columns = keyspace.prepareQuery( CF_ENTITY_LOG ).getKey( rowKey ).withColumnRange( version, null, false, maxSize ) .execute().getResult(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to load log entries", e ); } return parseResults( columns, entityId ); }
Example #15
Source File: CassandraMutagenImplTest.java From mutagen-cassandra with Apache License 2.0 | 5 votes |
/** * * */ @Test public void testData() throws Exception { final ColumnFamily<String,String> CF_TEST1= ColumnFamily.newColumnFamily("Test1", StringSerializer.get(),StringSerializer.get()); ColumnList<String> columns; columns=keyspace.prepareQuery(CF_TEST1) .getKey("row1") .execute() .getResult(); assertEquals("foo",columns.getStringValue("value1",null)); assertEquals("bar",columns.getStringValue("value2",null)); columns=keyspace.prepareQuery(CF_TEST1) .getKey("row2") .execute() .getResult(); assertEquals("chicken",columns.getStringValue("value1",null)); assertEquals("sneeze",columns.getStringValue("value2",null)); columns=keyspace.prepareQuery(CF_TEST1) .getKey("row3") .execute() .getResult(); assertEquals("bar",columns.getStringValue("value1",null)); assertEquals("baz",columns.getStringValue("value2",null)); }
Example #16
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 5 votes |
private ScriptModuleSpec getModuleSpec(ColumnList<String> columns) { ScriptModuleSpec moduleSpec = null; if (columns != null) { Column<String> moduleSpecColumn = columns.getColumnByName(Columns.module_spec.name()); if (moduleSpecColumn != null && moduleSpecColumn.hasValue()) { String moduleSpecString = moduleSpecColumn.getStringValue(); moduleSpec = getConfig().getModuleSpecSerializer().deserialize(moduleSpecString); } } return moduleSpec; }
Example #17
Source File: HystrixCassandraGetRow.java From Nicobar with Apache License 2.0 | 5 votes |
@Override protected ColumnList<String> run() throws Exception { RowQuery<RowKeyType, String> rowQuery = keyspace.prepareQuery(columnFamily).getKey(rowKey); /* apply column slice if we have one */ if (columns != null) { rowQuery = rowQuery.withColumnSlice(columns); } ColumnList<String> result = rowQuery.execute().getResult(); return result; }
Example #18
Source File: AstyanaxEventReaderDAO.java From emodb with Apache License 2.0 | 5 votes |
/** Executes a {@code RowQuery} with {@code autoPaginate(true)} repeatedly as necessary to fetch all pages. */ private <K, C> Iterator<Column<C>> executePaginated(final RowQuery<K, C> query) { return Iterators.concat(new AbstractIterator<Iterator<Column<C>>>() { @Override protected Iterator<Column<C>> computeNext() { ColumnList<C> page = execute(query); return !page.isEmpty() ? page.iterator() : endOfData(); } }); }
Example #19
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 5 votes |
private static Iterator<Map.Entry<String, StorageSummary>> decodeMetadataRows( final Iterator<Row<ByteBuffer, Composite>> rowIter, final AstyanaxTable table) { return new AbstractIterator<Map.Entry<String, StorageSummary>>() { @Override protected Map.Entry<String, StorageSummary> computeNext() { while (rowIter.hasNext()) { Row<ByteBuffer, Composite> row = rowIter.next(); ByteBuffer key = row.getKey(); ColumnList<Composite> columns = row.getColumns(); String blobId = AstyanaxStorage.getContentKey(key); StorageSummary summary = toStorageSummary(columns); if (summary == null) { continue; // Partial blob, parts may still be replicating. } // TODO should be removed for blob s3 migration // Cleanup older versions of the blob, if any (unlikely). deleteDataColumns(table, blobId, columns, ConsistencyLevel.CL_ANY, summary.getTimestamp()); return Maps.immutableEntry(blobId, summary); } return endOfData(); } }; }
Example #20
Source File: AstyanaxQueueDAO.java From emodb with Apache License 2.0 | 5 votes |
@Nullable @Override public ByteBuffer findMinRecord(UUID dataId, @Nullable ByteBuffer from) { // Use a column range with a "start" to skip past tombstones. ColumnList<ByteBuffer> columns = execute(_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(dataId) .withColumnRange(new RangeBuilder() .setStart(Objects.firstNonNull(from, EMPTY_BUFFER)) .setLimit(1) .build())); return !columns.isEmpty() ? columns.getColumnByIndex(0).getName() : null; }
Example #21
Source File: AstyanaxQueueDAO.java From emodb with Apache License 2.0 | 5 votes |
/** Executes a {@code RowQuery} with {@code autoPaginate(true)} repeatedly as necessary to fetch all pages. */ private <K, C> Iterator<Column<C>> executePaginated(final RowQuery<K, C> query) { return Iterators.concat(new AbstractIterator<Iterator<Column<C>>>() { @Override protected Iterator<Column<C>> computeNext() { ColumnList<C> page = execute(query); return !page.isEmpty() ? page.iterator() : endOfData(); } }); }
Example #22
Source File: QueryUtils.java From staash with Apache License 2.0 | 5 votes |
public static String formatQueryResult(CqlStatementResult rs, String cfname) { // TODO Auto-generated method stub String value = ""; JsonObject response = new JsonObject(); ColumnFamily<String, String> cf = ColumnFamily .newColumnFamily(cfname, StringSerializer.get(), StringSerializer.get()); Rows<String, String> rows = rs.getRows(cf); int rcount = 1; for (com.netflix.astyanax.model.Row<String, String> row : rows) { ColumnList<String> columns = row.getColumns(); Collection<String> colnames = columns.getColumnNames(); String rowStr = ""; String colStr = ""; if (colnames.contains("key") && colnames.contains("column1")) { colStr = colStr + columns.getDateValue("column1", null).toGMTString(); rowStr = rowStr + columns.getStringValue("value", null); response.putString(colStr, rowStr); } else { JsonObject rowObj = new JsonObject(); for (String colName:colnames) { //colStr = colStr+colname+","; value = columns.getStringValue(colName, null); //rowStr=rowStr+value+","; rowObj.putString(colName, value); } //rowobj.putString("columns", colStr); //rowobj.putString("values", rowStr); response.putObject(""+rcount++, rowObj); } } return response.toString(); }
Example #23
Source File: AstyanaxBlockedDataReaderDAO.java From emodb with Apache License 2.0 | 5 votes |
private Record newRecord(Key key, ByteBuffer rowKey, ColumnList<DeltaKey> columns, int largeRowThreshold, ReadConsistency consistency, @Nullable final Instant cutoffTime) { Iterator<Column<DeltaKey>> changeIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> compactionIter = getFilteredColumnIter(columns.iterator(), cutoffTime); Iterator<Column<DeltaKey>> rawMetadataIter = getFilteredColumnIter(columns.iterator(), cutoffTime); if (columns.size() >= largeRowThreshold) { // A large row such that the first query likely returned only a subset of all the columns. Lazily fetch // the rest while ensuring we never load all columns into memory at the same time. The current // Compactor+Resolver implementation must scan the row twice: once to find compaction records and once to // find deltas. So we must call columnScan() twice, once for each. DeltaKey lastColumn = columns.getColumnByIndex(columns.size() - 1).getName(); AstyanaxTable table = (AstyanaxTable) key.getTable(); AstyanaxStorage storage = table.getReadStorage(); DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); ColumnFamily<ByteBuffer, DeltaKey> columnFamily = placement.getBlockedDeltaColumnFamily(); // Execute the same scan 3 times, returning 3 iterators that process the results in different ways. In // practice at most two of the iterators are actually consumed (one or more is ignored) so the columnScan // should avoid actually doing any work until the first item is fetched from the iterator. changeIter = Iterators.concat(changeIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); compactionIter = Iterators.concat(compactionIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); rawMetadataIter = Iterators.concat(rawMetadataIter, getFilteredColumnIter(columnScan(rowKey, placement, columnFamily, lastColumn, null, false, _deltaKeyInc, Long.MAX_VALUE, 1, consistency), cutoffTime)); } Iterator<Map.Entry<DeltaClusteringKey, Change>> deltaChangeIter = decodeChanges(new AstyanaxDeltaIterator(changeIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<Map.Entry<DeltaClusteringKey, Compaction>> deltaCompactionIter = decodeCompactions(new AstyanaxDeltaIterator(compactionIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); Iterator<RecordEntryRawMetadata> deltaRawMetadataIter = rawMetadata(new AstyanaxDeltaIterator(rawMetadataIter, false, _deltaPrefixLength, ByteBufferUtil.bytesToHex((rowKey)))); return new RecordImpl(key, deltaCompactionIter, deltaChangeIter, deltaRawMetadataIter); }
Example #24
Source File: AstyanaxSupport.java From brooklyn-library with Apache License 2.0 | 5 votes |
/** * Read from a {@link CassandraNode} using the Astyanax API. * @throws ConnectionException */ public void readData(String keyspaceName) throws ConnectionException { // Create context AstyanaxContext<Keyspace> context = newAstyanaxContextForKeyspace(keyspaceName); try { Keyspace keyspace = context.getEntity(); // Query data OperationResult<ColumnList<String>> query = keyspace.prepareQuery(sampleColumnFamily) .getKey("one") .execute(); assertEquals(query.getHost().getHostName(), hostname); assertTrue(query.getLatency() > 0L); ColumnList<String> columns = query.getResult(); assertEquals(columns.size(), 2); // Lookup columns in response by name String name = columns.getColumnByName("name").getStringValue(); assertEquals(name, "Alice"); // Iterate through the columns for (Column<String> c : columns) { assertTrue(ImmutableList.of("name", "company").contains(c.getName())); } } finally { context.shutdown(); } }
Example #25
Source File: AstyanaxMetaDaoImpl.java From staash with Apache License 2.0 | 5 votes |
public Map<String, JsonObject> runQuery(String key, String col) { OperationResult<CqlStatementResult> rs; Map<String, JsonObject> resultMap = new HashMap<String, JsonObject>(); try { String queryStr = ""; if (col != null && !col.equals("*")) { queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='" + key + "' and column1='" + col + "';"; } else { queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='" + key + "';"; } rs = keyspace.prepareCqlStatement().withCql(queryStr).execute(); for (Row<String, String> row : rs.getResult().getRows(METACF)) { ColumnList<String> columns = row.getColumns(); String key1 = columns.getStringValue("column1", null); String val1 = columns.getStringValue("value", null); resultMap.put(key1, new JsonObject(val1)); } } catch (ConnectionException e) { e.printStackTrace(); throw new RuntimeException(e.getMessage()); } return resultMap; }
Example #26
Source File: AstyanaxBlockedDataReaderDAO.java From emodb with Apache License 2.0 | 4 votes |
/** * Scans a single row for columns within the specified range, inclusive or exclusive on start based on whether * <code>page</code> is non-zero, and inclusive on end. */ private <C> Iterator<Column<C>> columnScan(final ByteBuffer rowKey, final DeltaPlacement placement, final ColumnFamily<ByteBuffer, C> columnFamily, final C start, final C end, final boolean reversed, final ColumnInc<C> columnInc, final long limit, final long page, final ReadConsistency consistency) { return Iterators.concat(new AbstractIterator<Iterator<Column<C>>>() { private C _from = start; private long _remaining = limit; private long _page = page; @Override protected Iterator<Column<C>> computeNext() { if (_remaining <= 0) { return endOfData(); } // For page N+1, treat "_from" as exclusive. Since Cassandra doesn't support exclusive column ranges // bump the from value up to the next possible time UUID (assumes from != null when page != 0). if (_page > 0) { if (_from.equals(end)) { return endOfData(); } _from = reversed ? columnInc.previous(_from) : columnInc.next(_from); if (_from == null) { return endOfData(); } } // Execute the query int batchSize = (int) Math.min(_remaining, MAX_COLUMN_SCAN_BATCH); ColumnList<C> columns = execute(placement.getKeyspace() .prepareQuery(columnFamily, SorConsistencies.toAstyanax(consistency)) .getKey(rowKey) .withColumnRange(_from, end, reversed, batchSize), "scan columns in placement %s, column family %s, row %s, from %s to %s", placement.getName(), columnFamily.getName(), rowKey, start, end); // Update state for the next iteration. if (columns.size() >= batchSize) { // Save the last column key so we can use it as the start (exclusive) if we must query to get more data. _from = columns.getColumnByIndex(columns.size() - 1).getName(); _remaining = _remaining - columns.size(); _page++; } else { // If we got fewer columns than we asked for, another query won't find more columns. _remaining = 0; } // Track metrics. For rows w/more than 50 columns, count subsequent reads w/_largeRowReadMeter. (_page == 0 ? _randomReadMeter : _largeRowReadMeter).mark(); return columns.iterator(); } }); }
Example #27
Source File: MultiRowColumnIterator.java From usergrid with Apache License 2.0 | 4 votes |
/** * Multiple rows are present, merge them into a single result set * @param result * @return */ private List<T> mergeResults( final Rows<R, C> result, final int maxSize ) { if (logger.isTraceEnabled()) logger.trace( "Multiple rows have columns. Merging" ); final List<T> mergedResults = new ArrayList<>(maxSize); for ( final R key : result.getKeys() ) { final ColumnList<C> columns = result.getRow( key ).getColumns(); for (final Column<C> column :columns ) { final T returnedValue = columnParser.parseColumn( column ); //Use an O(log n) search, same as a tree, but with fast access to indexes for later operations int searchIndex = Collections.binarySearch( mergedResults, returnedValue, comparator ); /** * DO NOT remove this section of code. If you're seeing inconsistent results during shard transition, * you'll * need to enable this */ // // if ( previous != null && comparator.compare( previous, returnedValue ) == 0 ) { // throw new RuntimeException( String.format( // "Cassandra returned 2 unique columns, // but your comparator marked them as equal. This " + // "indicates a bug in your comparator. Previous value was %s and // current value is " + // "%s", // previous, returnedValue ) ); // } // // previous = returnedValue; //we've already seen it, no-op if(searchIndex > -1){ continue; } final int insertIndex = (searchIndex+1)*-1; //it's at the end of the list, don't bother inserting just to remove it if(insertIndex >= maxSize){ continue; } if (logger.isTraceEnabled()) logger.trace( "Adding value {} to merged set at index {}", returnedValue, insertIndex ); mergedResults.add( insertIndex, returnedValue ); //prune the mergedResults while ( mergedResults.size() > maxSize ) { if (logger.isTraceEnabled()) logger.trace( "Trimming results to size {}", maxSize ); //just remove from our tail until the size falls to the correct value mergedResults.remove(mergedResults.size()-1); } } if (logger.isTraceEnabled()) logger.trace( "Candidate result set size is {}", mergedResults.size() ); } return mergedResults; }
Example #28
Source File: AstyanaxEventReaderDAO.java From emodb with Apache License 2.0 | 4 votes |
/** Returns true to keep searching for more events, false to stop searching for events. */ private boolean readSlab(String channel, ByteBuffer slabId, SlabCursor cursor, boolean open, EventSink sink) { int start = cursor.get(); if (start == SlabCursor.END) { return true; } boolean recent = isRecent(slabId); // Event add and delete write with local quorum, so read with local quorum to get a consistent view of things. // Using a lower consistency level could result in (a) duplicate events because we miss deletes and (b) // incorrectly closing or deleting slabs when slabs look empty if we miss adds. ColumnList<Integer> eventColumns = execute( _keyspace.prepareQuery(ColumnFamilies.SLAB, ConsistencyLevel.CL_LOCAL_QUORUM) .getKey(slabId) .withColumnRange(start, Constants.OPEN_SLAB_MARKER, false, Integer.MAX_VALUE)); boolean searching = true; boolean empty = (start == 0); // If we skipped events in the query we must assume the slab isn't empty. boolean more = false; int next = start; for (Column<Integer> eventColumn : eventColumns) { int eventIdx = eventColumn.getName(); // Open slabs have a dummy entry at maxint that indicates that this slab is still open. if (eventIdx == Constants.OPEN_SLAB_MARKER) { break; } // Found at least one data item. empty = false; if (!searching) { more = true; // There are more events to be found next time we poll this slab. break; } // Pass the data on to the EventSink. It will tell us whether or not to keep searching. EventId eventId = AstyanaxEventId.create(channel, slabId, eventIdx); ByteBuffer eventData = eventColumn.getByteBufferValue(); searching = sink.accept(eventId, eventData); next = eventIdx; } // Next time we query this slab start the search with last event received by the sink, repeating it. cursor.set(next); // Stale open slab? Rare, should only happen when a writer crashes without cleaning up and closing its open // slabs. Normally writers re-write the OPEN_SLAB_MARKER column on every write as a sort of heartbeat. Readers // detect "stale" slabs when the open slab markers expire, and they close those slabs on behalf of the crashed writers. boolean hasOpenSlabMarker = !eventColumns.isEmpty() && eventColumns.getColumnByIndex(eventColumns.size() - 1).getName() == Constants.OPEN_SLAB_MARKER; boolean stale = open && !recent && !hasOpenSlabMarker; if (stale) { _staleSlabMeter.mark(); } // If the slab is currently closed or should be closed then it will never receive more data so check to see if // we can (a) delete it (it's empty) or at least (b) close it. if (empty && (!open || stale)) { deleteEmptySlabAsync(channel, slabId); open = false; } else if (stale) { closeStaleSlabAsync(channel, slabId); open = false; } // If we ran through all the data in a closed slab, skip this slab next time. This is especially common with // badly-behaving Databus listeners that poll repeatedly but don't ack. if (!more && !open) { cursor.set(SlabCursor.END); } return searching; }
Example #29
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 4 votes |
/** * Get all of the {@link ScriptArchive}s for the given set of moduleIds. Will perform the operation in batches * as specified by {@link CassandraArchiveRepositoryConfig#getArchiveFetchBatchSize()} and outputs the jar files in * the path specified by {@link CassandraArchiveRepositoryConfig#getArchiveOutputDirectory()}. * * @param moduleIds keys to search for * @return set of ScriptArchives retrieved from the database */ @Override public Set<ScriptArchive> getScriptArchives(Set<ModuleId> moduleIds) throws IOException { Set<ScriptArchive> archives = new LinkedHashSet<ScriptArchive>(moduleIds.size()*2); Path archiveOuputDir = getConfig().getArchiveOutputDirectory(); List<ModuleId> moduleIdList = new LinkedList<ModuleId>(moduleIds); int batchSize = getConfig().getArchiveFetchBatchSize(); int start = 0; try { while (start < moduleIdList.size()) { int end = Math.min(moduleIdList.size(), start + batchSize); List<ModuleId> batchModuleIds = moduleIdList.subList(start, end); List<String> rowKeys = new ArrayList<String>(batchModuleIds.size()); for (ModuleId batchModuleId:batchModuleIds) { rowKeys.add(batchModuleId.toString()); } Rows<String, String> rows = cassandra.getRows(rowKeys.toArray(new String[0])); for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); Column<String> hashColumn = columns.getColumnByName(Columns.archive_content_hash.name()); Column<String> contentColumn = columns.getColumnByName(Columns.archive_content.name()); if (lastUpdateColumn == null || hashColumn == null || contentColumn == null) { continue; } ScriptModuleSpec moduleSpec = getModuleSpec(columns); long lastUpdateTime = lastUpdateColumn.getLongValue(); byte[] hash = hashColumn.getByteArrayValue(); byte[] content = contentColumn.getByteArrayValue(); // verify the hash if (hash != null && hash.length > 0 && !verifyHash(hash, content)) { logger.warn("Content hash validation failed for moduleId {}. size: {}", moduleId, content.length); continue; } String fileName = new StringBuilder().append(moduleId).append("-").append(lastUpdateTime).append(".jar").toString(); Path jarFile = archiveOuputDir.resolve(fileName); Files.write(jarFile, content); JarScriptArchive scriptArchive = new JarScriptArchive.Builder(jarFile) .setModuleSpec(moduleSpec) .setCreateTime(lastUpdateTime) .build(); archives.add(scriptArchive); } start = end; } } catch (Exception e) { throw new IOException(e); } return archives; }
Example #30
Source File: CassandraGatewayImpl.java From Nicobar with Apache License 2.0 | 4 votes |
@Override public ColumnList<String> getRow(String rowKey) { return new HystrixCassandraGetRow<String>(keyspace, columnFamily, rowKey).execute(); }