Java Code Examples for com.datastax.driver.core.ResultSetFuture

The following examples show how to use com.datastax.driver.core.ResultSetFuture. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: arcusplatform   Source File: CassandraVideoV2Dao.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ListenableFuture<?> delete(UUID placeId, UUID recordingId, boolean isFavorite, Date purgeTime, int purgePartitionId) {
	
	BatchStatement stmt = new BatchStatement(Type.UNLOGGED);

	addDeleteStatements(stmt, placeId, recordingId, isFavorite, purgeTime, purgePartitionId);
	// Add to Purge table if it's favorite
	if(isFavorite) {
		VideoMetadata metadata = findByPlaceAndId(placeId, recordingId);
		metadata.setDeletionTime(purgeTime);
		metadata.setDeletionPartition(purgePartitionId);
		addStatementsForRemoveFromFavoriteTables(stmt, metadata);
	}

	long startTime = System.nanoTime();
	ResultSetFuture result = session.executeAsync(stmt);
	result.addListener(() -> DeleteTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS), MoreExecutors.directExecutor());
	return result;
}
 
Example 2
Source Project: titus-control-plane   Source File: StoreUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Execute single CASS statement, wrapping it into observable.
 */
static Observable<ResultSet> execute(Session session, Statement statement) {
    return Observable.create(emitter -> {
        ResultSetFuture resultSetFuture = session.executeAsync(statement);
        Futures.addCallback(resultSetFuture, new FutureCallback<ResultSet>() {
            @Override
            public void onSuccess(@Nullable ResultSet result) {
                emitter.onNext(result);
                emitter.onCompleted();
            }

            @Override
            public void onFailure(@Nonnull Throwable e) {
                emitter.onError(AgentStoreException.cassandraDriverError(e));
            }
        }, MoreExecutors.directExecutor());
    }, Emitter.BackpressureMode.NONE);
}
 
Example 3
Source Project: iotplatform   Source File: CassandraAbstractModelDao.java    License: Apache License 2.0 6 votes vote down vote up
protected ListenableFuture<List<D>> findListByStatementAsync(Statement statement) {
    if (statement != null) {
        statement.setConsistencyLevel(cluster.getDefaultReadConsistencyLevel());
        ResultSetFuture resultSetFuture = getSession().executeAsync(statement);
        return Futures.transform(resultSetFuture, new Function<ResultSet, List<D>>() {
            @Nullable
            @Override
            public List<D> apply(@Nullable ResultSet resultSet) {
                Result<E> result = getMapper().map(resultSet);
                if (result != null) {
                    List<E> entities = result.all();
                    return DaoUtil.convertDataList(entities);
                } else {
                    return Collections.emptyList();
                }
            }
        });
    }
    return Futures.immediateFuture(Collections.emptyList());
}
 
Example 4
Source Project: iotplatform   Source File: CassandraBaseTimeseriesDao.java    License: Apache License 2.0 6 votes vote down vote up
private ListenableFuture<List<TsKvEntry>> findAllAsyncWithLimit(EntityId entityId, TsKvQuery query) {
    long minPartition = toPartitionTs(query.getStartTs());
    long maxPartition = toPartitionTs(query.getEndTs());

    ResultSetFuture partitionsFuture = fetchPartitions(entityId, query.getKey(), minPartition, maxPartition);

    final SimpleListenableFuture<List<TsKvEntry>> resultFuture = new SimpleListenableFuture<>();
    final ListenableFuture<List<Long>> partitionsListFuture = Futures.transform(partitionsFuture, getPartitionsArrayFunction(), readResultsProcessingExecutor);

    Futures.addCallback(partitionsListFuture, new FutureCallback<List<Long>>() {
        @Override
        public void onSuccess(@Nullable List<Long> partitions) {
            TsKvQueryCursor cursor = new TsKvQueryCursor(entityId.getEntityType().name(), entityId.getId(), query, partitions);
            findAllAsyncSequentiallyWithLimit(cursor, resultFuture);
        }

        @Override
        public void onFailure(Throwable t) {
            log.error("[{}][{}] Failed to fetch partitions for interval {}-{}", entityId.getEntityType().name(), entityId.getId(), minPartition, maxPartition, t);
        }
    }, readResultsProcessingExecutor);

    return resultFuture;
}
 
Example 5
Source Project: iotplatform   Source File: CassandraBaseTimeseriesDao.java    License: Apache License 2.0 6 votes vote down vote up
private ListenableFuture<Optional<TsKvEntry>> findAndAggregateAsync(EntityId entityId, TsKvQuery query, long minPartition, long maxPartition) {
    final Aggregation aggregation = query.getAggregation();
    final String key = query.getKey();
    final long startTs = query.getStartTs();
    final long endTs = query.getEndTs();
    final long ts = startTs + (endTs - startTs) / 2;

    ResultSetFuture partitionsFuture = fetchPartitions(entityId, key, minPartition, maxPartition);

    ListenableFuture<List<Long>> partitionsListFuture = Futures.transform(partitionsFuture, getPartitionsArrayFunction(), readResultsProcessingExecutor);

    ListenableFuture<List<ResultSet>> aggregationChunks = Futures.transform(partitionsListFuture,
            getFetchChunksAsyncFunction(entityId, key, aggregation, startTs, endTs), readResultsProcessingExecutor);

    return Futures.transform(aggregationChunks, new AggregatePartitionsFunction(aggregation, key, ts), readResultsProcessingExecutor);
}
 
Example 6
Source Project: emodb   Source File: AdaptiveResultSet.java    License: Apache License 2.0 6 votes vote down vote up
private static ListenableFuture<ResultSet> executeAdaptiveQueryAsync(Session session, Statement statement, int fetchSize,
                                                                     int remainingAdaptations) {

    statement.setFetchSize(fetchSize);

    ResultSetFuture rawFuture = session.executeAsync(statement);

    // Lazily wrap the result set from the async result with an AdaptiveResultSet
    ListenableFuture<ResultSet> adaptiveFuture = Futures.transform(rawFuture, new Function<ResultSet, ResultSet>() {
        @Override
        public ResultSet apply(ResultSet resultSet) {
            return new AdaptiveResultSet(session, resultSet, remainingAdaptations);
        }
    });

    return Futures.withFallback(adaptiveFuture, t -> {
        if (isAdaptiveException(t) && remainingAdaptations > 0 && fetchSize > MIN_FETCH_SIZE) {
            // Try again with half the fetch size
            int reducedFetchSize = Math.max(fetchSize / 2, MIN_FETCH_SIZE);
            _log.debug("Repeating previous query with fetch size {} due to {}", reducedFetchSize, t.getMessage());
            return executeAdaptiveQueryAsync(session, statement, reducedFetchSize, remainingAdaptations - 1);
        }
        throw Throwables.propagate(t);
    });
}
 
Example 7
Source Project: thorntail   Source File: StatefulTestBean.java    License: Apache License 2.0 6 votes vote down vote up
public Row asyncQuery() {
    openConnection();
    try {
        session.execute("CREATE TABLE employee (lastname varchar primary key, firstname varchar, age int, city varchar, email varchar)");
        session.execute("INSERT INTO employee (lastname, firstname, age, city, email) VALUES ('Smith','Leanne', 30, 'Boston', '[email protected]')");
        session.execute("update employee set age = 36 where lastname = 'Smith'");
        // Select and show the change
        try {
            ResultSetFuture results = session.executeAsync("select * from employee where lastname='Smith'");
            return results.get().one();
        } catch( Throwable exception) {
            throw new RuntimeException("could not get executeAsync result for some reason", exception);
        }
    } finally {
        try {
            session.execute("DROP TABLE employee");
        } catch (Throwable ignore) {
        }

        closeConnection();
    }
}
 
Example 8
Source Project: cassandra-reaper   Source File: CassandraStorage.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Collection<RepairRun> getRepairRunsForCluster(String clusterName, Optional<Integer> limit) {
  List<ResultSetFuture> repairRunFutures = Lists.<ResultSetFuture>newArrayList();

  // Grab all ids for the given cluster name
  Collection<UUID> repairRunIds = getRepairRunIdsForCluster(clusterName);
  // Grab repair runs asynchronously for all the ids returned by the index table
  for (UUID repairRunId : repairRunIds) {
    repairRunFutures.add(session.executeAsync(getRepairRunPrepStmt.bind(repairRunId)));
    if (repairRunFutures.size() == limit.orElse(1000)) {
      break;
    }
  }

  return getRepairRunsAsync(repairRunFutures);
}
 
Example 9
Source Project: cassandra-reaper   Source File: CassandraStorage.java    License: Apache License 2.0 6 votes vote down vote up
private Collection<? extends RepairRun> getRepairRunsWithStateForCluster(
    Collection<UUID> clusterRepairRunsId,
    RunState runState) {

  Collection<RepairRun> repairRuns = Sets.newHashSet();
  List<ResultSetFuture> futures = Lists.newArrayList();

  for (UUID repairRunId : clusterRepairRunsId) {
    futures.add(session.executeAsync(getRepairRunPrepStmt.bind(repairRunId)));
  }

  for (ResultSetFuture future : futures) {
    ResultSet repairRunResult = future.getUninterruptibly();
    for (Row row : repairRunResult) {
      repairRuns.add(buildRepairRunFromRow(row, row.getUUID("id")));
    }
  }

  return repairRuns.stream().filter(repairRun -> repairRun.getRunState() == runState).collect(Collectors.toSet());
}
 
Example 10
private void execute(BoundStatement q, JetstreamEvent event) {
    try {
        ResultSetFuture future = cassandraSession.executeAsync(q);
        CallBackListener listener = new CallBackListener(future, event);
        future.addListener(listener, pool);
        pendingRequestCounter.incrementAndGet();
    } catch (Throwable ex) {
        LOGGER.error(
                "Error publising metrics in MetricCassandraCollector:"
                        + ex.getMessage());
        cassandraErrorCount.increment();
        if (event.get(JetstreamReservedKeys.MessageAffinityKey.toString()) == null) {
            event.put(JetstreamReservedKeys.MessageAffinityKey.toString(),
                    (String) event.get(MCConstant.METRIC_NAME));
        }
        getAdviceListener().retry(event, RetryEventCode.MSG_RETRY,
                ex.getMessage());
        eventSentToAdviceListener.increment();
        registerError(ex);
    }
}
 
Example 11
private void runBatchInsert(List<Insert> insertRequest) {
    try {
        Batch batch;
        if (config.getLoggedBatch()) {
            batch = QueryBuilder.batch(insertRequest
                    .toArray(new RegularStatement[insertRequest.size()]));
        } else {
            batch = QueryBuilder.unloggedBatch(insertRequest
                    .toArray(new RegularStatement[insertRequest.size()]));
        }
        totalCassandraInsertRequest.addAndGet(insertRequest.size());
        ResultSetFuture future = cassandraSession.executeAsync(batch);
        CallBackListener listener = new CallBackListener(future, null);
        future.addListener(listener, pool);
        incrementBatchInsertCounter();
        pendingRequestCounter.incrementAndGet();
    } catch (Throwable ex) {
        LOGGER.error("Error publising metrics in MetricCassandraCollector:" + ex.getMessage());
        cassandraErrorCount.increment();
        registerError(ex);
    } finally {
        insertRequest.clear();
    }
}
 
Example 12
Source Project: datacollector   Source File: CassandraTarget.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Submits async un-batched records for performance improvement
 */
public void writeUnbatchedInsert(Batch batch) throws StageException {
  Iterator<Record> records = batch.getRecords();
  Map<BoundStatement, Record> statementsToExecute = new HashMap<>();
  Map<ResultSetFuture, Record> tasks = new HashMap<>();

  while (records.hasNext()) {
    final Record record = records.next();
    BoundStatement boundStatement = recordToBoundStatement(record);
    if (boundStatement != null) {
      statementsToExecute.put(boundStatement, record);
    }
  }

  for (BoundStatement statement : statementsToExecute.keySet()) {
    tasks.put(session.executeAsync(statement), statementsToExecute.get(statement));
    if (tasks.size() == getMaxConnections()) {
      getTaskResult(tasks);
    }
  }

  if (tasks.size() > 0) {
    getTaskResult(tasks);
  }
}
 
Example 13
Source Project: datacollector   Source File: CassandraTarget.java    License: Apache License 2.0 6 votes vote down vote up
private void getTaskResult(Map<ResultSetFuture, Record> tasks) {
  for (ResultSetFuture task : tasks.keySet()) {
    try {
      task.getUninterruptibly(conf.writeTimeout, TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
      LOG.debug(Errors.CASSANDRA_11.getMessage(), conf.writeTimeout, e);
      Record errorRecord = tasks.get(task);
      errorRecordHandler.onError(
          new OnRecordErrorException(
              errorRecord,
              Errors.CASSANDRA_11,
              conf.writeTimeout,
              e.toString(),
              e
          )
      );
    }
  }
  tasks.clear();
}
 
Example 14
Source Project: Flink-CEPplus   Source File: CassandraOutputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void writeRecord(OUT record) throws IOException {
	if (exception != null) {
		throw new IOException("write record failed", exception);
	}

	Object[] fields = extractFields(record);
	ResultSetFuture result = session.executeAsync(prepared.bind(fields));
	Futures.addCallback(result, callback);
}
 
Example 15
Source Project: flink   Source File: CassandraOutputFormatBase.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void writeRecord(OUT record) throws IOException {
	if (exception != null) {
		throw new IOException("write record failed", exception);
	}

	Object[] fields = extractFields(record);
	ResultSetFuture result = session.executeAsync(prepared.bind(fields));
	Futures.addCallback(result, callback);
}
 
Example 16
Source Project: arcusplatform   Source File: VideoV2Util.java    License: Apache License 2.0 5 votes vote down vote up
static ResultSetFuture executeAsyncAndUpdateTimer(Session session, Statement stmt, Timer timer) {
	long startTime = System.nanoTime();
	try{
		ResultSetFuture rs = session.executeAsync(stmt);
		if(timer != null) {
			rs.addListener(() -> timer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS), MoreExecutors.directExecutor());
		}
		return rs;
	}finally{
		
	}
}
 
Example 17
Source Project: localization_nifi   Source File: QueryCassandraTest.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
                                String username, String password) {
    Cluster mockCluster = mock(Cluster.class);
    try {
        Metadata mockMetadata = mock(Metadata.class);
        when(mockMetadata.getClusterName()).thenReturn("cluster1");
        when(mockCluster.getMetadata()).thenReturn(mockMetadata);
        Session mockSession = mock(Session.class);
        when(mockCluster.connect()).thenReturn(mockSession);
        when(mockCluster.connect(anyString())).thenReturn(mockSession);
        Configuration config = Configuration.builder().build();
        when(mockCluster.getConfiguration()).thenReturn(config);
        ResultSetFuture future = mock(ResultSetFuture.class);
        ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
        when(future.getUninterruptibly()).thenReturn(rs);
        try {
            doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
        } catch (TimeoutException te) {
            throw new IllegalArgumentException("Mocked cluster doesn't time out");
        }
        if (exceptionToThrow != null) {
            when(mockSession.executeAsync(anyString())).thenThrow(exceptionToThrow);
        } else {
            when(mockSession.executeAsync(anyString())).thenReturn(future);
        }
    } catch (Exception e) {
        fail(e.getMessage());
    }
    return mockCluster;
}
 
Example 18
Source Project: nifi   Source File: PutCassandraRecordTest.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext,
                                String username, String password, String compressionType) {
    Cluster mockCluster = mock(Cluster.class);
    try {
        Metadata mockMetadata = mock(Metadata.class);
        when(mockMetadata.getClusterName()).thenReturn("cluster1");
        when(mockCluster.getMetadata()).thenReturn(mockMetadata);
        when(mockCluster.connect()).thenReturn(mockSession);
        when(mockCluster.connect(anyString())).thenReturn(mockSession);
        Configuration config = Configuration.builder().build();
        when(mockCluster.getConfiguration()).thenReturn(config);
        ResultSetFuture future = mock(ResultSetFuture.class);
        ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
        PreparedStatement ps = mock(PreparedStatement.class);
        when(mockSession.prepare(anyString())).thenReturn(ps);
        BoundStatement bs = mock(BoundStatement.class);
        when(ps.bind()).thenReturn(bs);
        when(future.getUninterruptibly()).thenReturn(rs);
        try {
            doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class));
        } catch (TimeoutException te) {
            throw new IllegalArgumentException("Mocked cluster doesn't time out");
        }
        if (exceptionToThrow != null) {
            doThrow(exceptionToThrow).when(mockSession).executeAsync(anyString());
            doThrow(exceptionToThrow).when(mockSession).executeAsync(any(Statement.class));

        } else {
            when(mockSession.executeAsync(anyString())).thenReturn(future);
            when(mockSession.executeAsync(any(Statement.class))).thenReturn(future);
        }
        when(mockSession.getCluster()).thenReturn(mockCluster);
    } catch (Exception e) {
        fail(e.getMessage());
    }
    return mockCluster;
}
 
Example 19
Source Project: sunbird-lms-service   Source File: CassandraDACImpl.java    License: MIT License 5 votes vote down vote up
public void applyOperationOnRecordsAsync(
    String keySpace,
    String table,
    Map<String, Object> filters,
    List<String> fields,
    FutureCallback<ResultSet> callback) {
  Session session = connectionManager.getSession(keySpace);
  try {
    Select select;
    if (CollectionUtils.isNotEmpty(fields)) {
      select = QueryBuilder.select((String[]) fields.toArray()).from(keySpace, table);
    } else {
      select = QueryBuilder.select().all().from(keySpace, table);
    }

    if (MapUtils.isNotEmpty(filters)) {
      Select.Where where = select.where();
      for (Map.Entry<String, Object> filter : filters.entrySet()) {
        Object value = filter.getValue();
        if (value instanceof List) {
          where = where.and(QueryBuilder.in(filter.getKey(), ((List) filter.getValue())));
        } else {
          where = where.and(QueryBuilder.eq(filter.getKey(), filter.getValue()));
        }
      }
    }
    ResultSetFuture future = session.executeAsync(select);
    Futures.addCallback(future, callback, Executors.newFixedThreadPool(1));
  } catch (Exception e) {
    ProjectLogger.log(Constants.EXCEPTION_MSG_FETCH + table + " : " + e.getMessage(), e);
    throw new ProjectCommonException(
        ResponseCode.SERVER_ERROR.getErrorCode(),
        ResponseCode.SERVER_ERROR.getErrorMessage(),
        ResponseCode.SERVER_ERROR.getResponseCode());
  }
}
 
Example 20
Source Project: geowave   Source File: BatchedWrite.java    License: Apache License 2.0 5 votes vote down vote up
private void executeAsync(final Statement statement) throws InterruptedException {
  writeSemaphore.acquire();
  final ResultSetFuture future = session.executeAsync(statement);
  Futures.addCallback(
      future,
      new IngestCallback(writeSemaphore),
      CassandraOperations.WRITE_RESPONSE_THREADS);
}
 
Example 21
Source Project: blueflood   Source File: DAbstractMetricsRW.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Converts a list of {@link com.datastax.driver.core.ResultSetFuture} for each
 * {@link com.rackspacecloud.blueflood.types.Locator} to
 * {@link com.rackspacecloud.blueflood.outputs.formats.MetricData} object.
 *
 * @param resultSets
 * @param locatorIO
 * @param columnFamily
 * @return
 */
protected Map<Locator, MetricData> resultSetsToMetricData(Map<Locator, List<ResultSetFuture>> resultSets,
                                                          Map<Locator, DAbstractMetricIO> locatorIO,
                                                          String columnFamily,
                                                          Range range) {

    MetadataCache metadataCache = MetadataCache.getInstance();

    // iterate through all ResultSetFuture
    Map<Locator, MetricData> locatorMetricDataMap = new HashMap<Locator, MetricData>();
    for (Map.Entry<Locator, List<ResultSetFuture>> entry : resultSets.entrySet() ) {
        Locator locator = entry.getKey();
        List<ResultSetFuture> futures = entry.getValue();

        DAbstractMetricIO io = locatorIO.get(locator);

        // get ResultSets to a Table of locator, timestamp, rollup
        Table<Locator, Long, Object> locatorTimestampRollup = io.toLocatorTimestampValue(futures, locator, columnFamily, range);

        Map<Long, Object> tsRollupMap = locatorTimestampRollup.row( locator );

        // convert to Points and MetricData
        Points points = convertToPoints( tsRollupMap );

        // create MetricData
        MetricData metricData = new MetricData( points, metadataCache.getUnitString( locator ) );
        locatorMetricDataMap.put( locator, metricData );
    }
    return locatorMetricDataMap;
}
 
Example 22
Source Project: hawkular-metrics   Source File: RxSessionImpl.java    License: Apache License 2.0 5 votes vote down vote up
private Observable<ResultSet> scheduleStatement(Statement st, Scheduler scheduler) {
    while(true) {
        if(availableInFlightSlots(st)) {
            ResultSetFuture future = session.executeAsync(st);
            return ListenableFutureObservable.from(future, scheduler);
        } else {
            try {
                Thread.sleep(0, 1);
            } catch (InterruptedException e) {
                //
            }
        }
    }
}
 
Example 23
Source Project: iotplatform   Source File: BaseRelationDao.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ListenableFuture<Boolean> deleteRelation(EntityId from, EntityId to, String relationType, RelationTypeGroup typeGroup) {
    BoundStatement stmt = getDeleteStmt().bind()
            .setUUID(0, from.getId())
            .setString(1, from.getEntityType().name())
            .setUUID(2, to.getId())
            .setString(3, to.getEntityType().name())
            .set(4, typeGroup, relationTypeGroupCodec)
            .setString(5, relationType);
    ResultSetFuture future = executeAsyncWrite(stmt);
    return getBooleanListenableFuture(future);
}
 
Example 24
Source Project: iotplatform   Source File: BaseRelationDao.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ListenableFuture<Boolean> deleteOutboundRelations(EntityId entity) {
    BoundStatement stmt = getDeleteAllByEntityStmt().bind()
            .setUUID(0, entity.getId())
            .setString(1, entity.getEntityType().name());
    ResultSetFuture future = executeAsyncWrite(stmt);
    return getBooleanListenableFuture(future);
}
 
Example 25
Source Project: newts   Source File: CassandraSearcher.java    License: Apache License 2.0 5 votes vote down vote up
private Collection<String> getMetricNamesFromResults(ResultSetFuture results) throws InterruptedException, ExecutionException {
    List<String> metricNames = Lists.newArrayList();

    for (Row row : results.get()) {
        metricNames.add(row.getString(Schema.C_METRICS_NAME));
    }

    return metricNames;
}
 
Example 26
@Override
public @NotNull CompletableFuture<ResponseInfo<String>> execute(
    @NotNull RequestInfo<Void> request,
    @NotNull Executor longRunningTaskExecutor,
    @NotNull ChannelHandlerContext ctx
) {
    Session session = EmbeddedCassandraUtils.cassandraSession(disableCassandra);
    if (session == null) {
        ApiError apiErrorToThrow = (disableCassandra)
                                   ? EXAMPLE_EMBEDDED_CASSANDRA_DISABLED
                                   : SampleCoreApiError.GENERIC_SERVICE_ERROR;

        throw ApiException.newBuilder()
                          .withApiErrors(apiErrorToThrow)
                          .withExceptionMessage("Unable to get cassandra session.")
                          .build();
    }

    ResultSetFuture cassandraResultFuture = session.executeAsync(basicCassandraQuery);

    // Convert the cassandra result future to a CompletableFuture, then add a listener that turns the result of the
    //      Cassandra call into the ResponseInfo<String> we need to return. Note that we're not doing
    //      thenApplyAsync() because the work done to translate the Cassandra result to our ResponseInfo object is
    //      trivial and doesn't need it's own thread. If you had more complex logic that was time consuming (or more
    //      blocking calls) you would want to do the extra work with CompletableFuture.*Async() calls.
    return FutureConverter
        .toCompletableFuture(cassandraResultFuture)
        .thenApply(functionWithTracingAndMdc(this::buildResponseFromCassandraQueryResult, ctx));
}
 
Example 27
Source Project: cassandra-reaper   Source File: CassandraStorage.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Collection<RepairRun> getRepairRunsForUnit(UUID repairUnitId) {
  List<ResultSetFuture> repairRunFutures = Lists.<ResultSetFuture>newArrayList();

  // Grab all ids for the given cluster name
  ResultSet repairRunIds = session.execute(getRepairRunForUnitPrepStmt.bind(repairUnitId));

  // Grab repair runs asynchronously for all the ids returned by the index table
  for (Row repairRunId : repairRunIds) {
    repairRunFutures.add(session.executeAsync(getRepairRunPrepStmt.bind(repairRunId.getUUID("id"))));
  }

  return getRepairRunsAsync(repairRunFutures);
}
 
Example 28
Source Project: copper-engine   Source File: CassandraStorage.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ListenableFuture<Void> deleteWorkflowInstance(String wfId) throws Exception {
    logger.debug("deleteWorkflowInstance({})", wfId);
    session.executeAsync(preparedStatements.get(CQL_DEL_WFI_ID).bind(wfId));
    final PreparedStatement pstmt = preparedStatements.get(CQL_DEL_WORKFLOW_INSTANCE_WAITING);
    final long startTS = System.nanoTime();
    final ResultSetFuture rsf = session.executeAsync(pstmt.bind(wfId));
    return createSettableFuture(rsf, "wfi.delete", startTS);
}
 
Example 29
Source Project: skywalking   Source File: CaseController.java    License: Apache License 2.0 5 votes vote down vote up
private void executeAsync(Session session) {
    logger.info("execute in async");

    ResultSetFuture createKeyspaceDataResultSetFuture = session.executeAsync(CREATE_KEYSPACE_SQL);
    ResultSet createKeyspaceDataResultSet = createKeyspaceDataResultSetFuture.getUninterruptibly();
    logger.info("CREATE KEYSPACE result: " + createKeyspaceDataResultSet.toString());

    ResultSetFuture createTableDataResultSetFuture = session.executeAsync(CREATE_TABLE_SQL);
    ResultSet createTableDataResultSet = createTableDataResultSetFuture.getUninterruptibly();
    logger.info("CREATE TABLE result: " + createTableDataResultSet.toString());

    PreparedStatement insertDataPreparedStatement = session.prepare(INSERT_DATA_SQL);
    ResultSetFuture insertDataResultSetFuture = session.executeAsync(insertDataPreparedStatement.bind("101", "foobar"));
    ResultSet insertDataResultSet = insertDataResultSetFuture.getUninterruptibly();
    logger.info("INSERT result: " + insertDataResultSet.toString());

    PreparedStatement selectDataPreparedStatement = session.prepare(SELECT_DATA_SQL);
    ResultSetFuture resultSetFuture = session.executeAsync(selectDataPreparedStatement.bind("101"));
    ResultSet resultSet = resultSetFuture.getUninterruptibly();
    Row row = resultSet.one();
    logger.info("SELECT result: id: {}, value: {}", row.getString("id"), row.getString("value"));

    PreparedStatement deleteDataPreparedStatement = session.prepare(DELETE_DATA_SQL);
    ResultSetFuture deleteDataResultSetFuture = session.executeAsync(deleteDataPreparedStatement.bind("101"));
    ResultSet deleteDataResultSet = deleteDataResultSetFuture.getUninterruptibly();
    logger.info("DELETE result: " + deleteDataResultSet.toString());

    ResultSetFuture dropTableDataResultSetFuture = session.executeAsync(DROP_TABLE_SQL);
    ResultSet dropTableDataResultSet = dropTableDataResultSetFuture.getUninterruptibly();
    logger.info("DROP TABLE result: " + dropTableDataResultSet.toString());

    ResultSetFuture dropKeyspaceDataResultSetFuture = session.executeAsync(DROP_KEYSPACE);
    ResultSet dropKeyspaceDataResultSet = dropKeyspaceDataResultSetFuture.getUninterruptibly();
    logger.info("DROP KEYSPACE result: " + dropKeyspaceDataResultSet.toString());
}
 
Example 30
Source Project: glowroot   Source File: CassandraAsyncIT.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void transactionMarker() throws Exception {
    List<ResultSetFuture> futures = Lists.newArrayList();
    for (int i = 0; i < 100; i++) {
        futures.add(session.executeAsync("SELECT * FROM test.users"));
    }
    for (ResultSetFuture future : futures) {
        ResultSet results = future.get();
        for (Row row : results) {
            row.getInt("id");
        }
    }
}