Java Code Examples for com.datastax.driver.core.BatchStatement#add()
The following examples show how to use
com.datastax.driver.core.BatchStatement#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AuthorizationGrantDAOImpl.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void save(AuthorizationGrant grant) { Preconditions.checkNotNull(grant, "grant must not be null"); Preconditions.checkNotNull(grant.getEntityId(), "entity id must not be null"); Preconditions.checkNotNull(grant.getAccountId(), "account id must not be null"); Preconditions.checkNotNull(grant.getPlaceId(), "place id must not be null"); // uses upsert semantics where an insert statement will update the existing row if it already exists BatchStatement batch = new BatchStatement(); batch.add(bindUpsert(upsert, grant)); batch.add(bindUpsert(upsertByPlace, grant)); try(Context ctxt = upsertTimer.time()) { this.session.execute(batch); } }
Example 2
Source File: BaseCassandraCRUDDao.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void delete(T entity) { if(entity == null || entity.getId() == null) { return; } Statement statement = new BoundStatement(delete).bind(entity.getId()); List<Statement> indexDeletes = prepareIndexDeletes(entity); if(!indexDeletes.isEmpty()) { BatchStatement batch = new BatchStatement(); batch.add(statement); addToBatch(batch, indexDeletes); statement = batch; } try(Context ctxt = deleteTimer.time()) { session.execute(statement); } }
Example 3
Source File: DeepCqlRecordWriter.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Executes cql batch statements in Cassandra */ @Override public void run() { LOG.debug("[" + this + "] Executing batch write to cassandra"); try { final PreparedStatement preparedStatement = sessionWithHost.prepare(cql); final BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); for (final List<Object> record : records) { batchStatement.add(preparedStatement.bind(record.toArray(new Object[record.size()]))); } sessionWithHost.execute(batchStatement); } catch (Exception e) { LOG.error("[" + this + "] Exception occurred while trying to execute batch in cassandra: " + e.getMessage()); } }
Example 4
Source File: Cassandra2xDefaultMapDAO.java From cumulusrdf with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public void delete(final K... keys) { if (keys == null || keys.length == 0) { return; } BatchStatement batchStatement = new BatchStatement(); for (K key : keys) { if (key != null) { ByteBuffer serializedKey = _keySerializer.serialize(key); BoundStatement deleteStatement = _deleteStatement.bind(serializedKey); batchStatement.add(deleteStatement); } } _session.execute(batchStatement); }
Example 5
Source File: CassandraEventData.java From yb-sample-apps with Apache License 2.0 | 6 votes |
@Override public long doWrite(int threadIdx) { // Pick a random data source. DataSource dataSource = dataSources.get(random.nextInt(dataSources.size())); long numKeysWritten = 0; BatchStatement batch = new BatchStatement(); // Enter a batch of data points. long ts = dataSource.getDataEmitTs(); for (int i = 0; i < appConfig.batchSize; i++) { batch.add(getPreparedInsert().bind().setString("device_id", dataSource.getDeviceId()).setLong("ts", ts) .setString("event_type", dataSource.getEventType()) .setBytesUnsafe("value", getValue(dataSource.getDeviceId()))); numKeysWritten++; ts++; } dataSource.setLastEmittedTs(ts); getCassandraClient().execute(batch); return numKeysWritten; }
Example 6
Source File: InvitationDAOImpl.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void cancel(Invitation invitation) { Preconditions.checkNotNull(invitation, "invitation is required"); try(Context timer = cancelTimer.time()) { BatchStatement stmt = new BatchStatement(); BoundStatement tblDel = new BoundStatement(delete); tblDel.setString(Column.code.name(), invitation.getCode()); stmt.add(tblDel); BoundStatement placeIdxDel = new BoundStatement(deletePlaceIdx); placeIdxDel.setString(Column.code.name(), invitation.getCode()); placeIdxDel.setUUID(Column.placeId.name(), UUID.fromString(invitation.getPlaceId())); stmt.add(placeIdxDel); if(invitation.getInviteeId() != null) { BoundStatement personIdxDel = new BoundStatement(deletePersonIdx); personIdxDel.setString(Column.code.name(), invitation.getCode()); personIdxDel.setUUID(Column.inviteeId.name(), UUID.fromString(invitation.getInviteeId())); stmt.add(personIdxDel); } session.execute(stmt); } }
Example 7
Source File: DAbstractMetricsRW.java From blueflood with Apache License 2.0 | 5 votes |
/** * This method inserts a collection of {@link com.rackspacecloud.blueflood.service.SingleRollupWriteContext} objects * to the appropriate Cassandra column family. * * It performs the inserts by executing an UNLOGGED BATCH statement. * * @param writeContexts * * @throws IOException */ @Override public void insertRollups(List<SingleRollupWriteContext> writeContexts) { if (writeContexts.size() == 0) { return; } Timer.Context ctx = Instrumentation.getWriteTimerContext( writeContexts.get( 0 ).getDestinationCF().getName() ); try { BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (SingleRollupWriteContext writeContext : writeContexts) { Rollup rollup = writeContext.getRollup(); Locator locator = writeContext.getLocator(); Granularity granularity = writeContext.getGranularity(); int ttl = getTtl(locator, rollup.getRollupType(), granularity); // lookup the right writer RollupType rollupType = writeContext.getRollup().getRollupType(); DAbstractMetricIO io = getIO(rollupType.name().toLowerCase(), granularity); Statement statement = io.createStatement(locator, writeContext.getTimestamp(), rollup, writeContext.getGranularity(), ttl); batch.add(statement); } Session session = DatastaxIO.getSession(); session.execute(batch); } catch (Exception ex) { Instrumentation.markWriteError(); LOG.error(String.format("error writing locator batch of size %s, granularity %s", writeContexts.size(), writeContexts.get(0).getGranularity()), ex); } finally { ctx.stop(); } }
Example 8
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 5 votes |
private void addNonFavoriteTags(UUID placeId, UUID recordingId, Set<String> tags, long ttlInSeconds) { BatchStatement stmt = new BatchStatement(BatchStatement.Type.LOGGED); long expiration = VideoV2Util.createExpirationFromTTL(recordingId, ttlInSeconds); long actualTtlInSeconds = VideoV2Util.createActualTTL(recordingId, expiration); for(String tag: tags) { stmt.add(recordingMetadataTable.insertTag(recordingId, expiration, actualTtlInSeconds, tag)); stmt.add(placeRecordingIndex.insertTag(placeId, recordingId, expiration, actualTtlInSeconds, tag)); } executeAndUpdateTimer(session, stmt, AddTagsTimer); }
Example 9
Source File: BaseCassandraCRUDDao.java From arcusplatform with Apache License 2.0 | 5 votes |
protected T doUpdate(T entity) { Date modified = new Date(); List<Object> allValues = new LinkedList<Object>(); allValues.add(modified); allValues.add(entity.getTags()); allValues.add(entity.getImages()); allValues.addAll(getValues(entity)); allValues.add(entity.getId()); Statement statement = new BoundStatement(update).bind(allValues.toArray()); // TODO - implement smarter indexing List<Statement> indexUpdateStatements = prepareIndexUpdates(entity); if(!indexUpdateStatements.isEmpty()) { BatchStatement batch = new BatchStatement(); batch.add(statement); addToBatch(batch, indexUpdateStatements); statement = batch; } try(Context ctxt = updateTimer.time()) { session.execute(statement); } T copy = entity.copy(); copy.setModified(modified); return copy; }
Example 10
Source File: CassandraPersonalization.java From yb-sample-apps with Apache License 2.0 | 5 votes |
@Override public long doWrite(int threadIdx) { BatchStatement batch = new BatchStatement(); PreparedStatement insert = getPreparedInsert(); Key key = getSimpleLoadGenerator().getKeyToWrite(); try { int totalCouponCount = 0; for (int i = 0; i < appConfig.numStores; i++) { String customerId = key.asString(); String storeId = Integer.toString(i); int couponCount = appConfig.numNewCouponsPerCustomer / appConfig.numStores; for (int j = 0; j < couponCount; j++) { Coupon coupon = coupons.elementAt(j); batch.add(insert.bind(customerId, storeId, coupon.code, coupon.beginDate, coupon.endDate, Double.valueOf(generateRandomRelevanceScore()))); } totalCouponCount += couponCount; } ResultSet resultSet = getCassandraClient().execute(batch); LOG.debug("Wrote coupon count: " + totalCouponCount + ", return code: " + resultSet.toString()); getSimpleLoadGenerator().recordWriteSuccess(key); return 1; } catch (Exception e) { getSimpleLoadGenerator().recordWriteFailure(key); throw e; } }
Example 11
Source File: HttpTestUtil.java From simulacron with Apache License 2.0 | 5 votes |
public static BatchStatement makeNativeBatchStatement(List<String> queries, List<List> values) { BatchStatement statement = new BatchStatement(); Iterator<List> valuesIterator = values.iterator(); for (String query : queries) { List value = valuesIterator.next(); statement.add(new SimpleStatement(query, value.toArray(new Object[value.size()]))); } return statement; }
Example 12
Source File: DBasicMetricsRW.java From blueflood with Apache License 2.0 | 5 votes |
/** * Inserts a collection of metrics in a batch using an unlogged * {@link BatchStatement} * * @param metrics * @return */ private void insertMetricsInBatch(Collection<IMetric> metrics) throws IOException { BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (IMetric metric : metrics) { BoundStatement bound = simpleNumberIO.getBoundStatementForMetric(metric); batch.add(bound); Instrumentation.markFullResMetricWritten(); Locator locator = metric.getLocator(); if( !LocatorCache.getInstance().isLocatorCurrentInBatchLayer(locator) ) { LocatorCache.getInstance().setLocatorCurrentInBatchLayer(locator); batch.add(locatorIO.getBoundStatementForLocator( locator )); } // if we are recording delayed metrics, we may need to do an // extra insert if ( isRecordingDelayedMetrics ) { BoundStatement bs = getBoundStatementForMetricIfDelayed(metric); if ( bs != null ) { batch.add(bs); } } } LOG.trace(String.format("insert batch statement size=%d", batch.size())); try { DatastaxIO.getSession().execute(batch); } catch ( Exception ex ) { Instrumentation.markWriteError(); LOG.error(String.format("error writing batch of %d metrics", batch.size()), ex ); } }
Example 13
Source File: AuthorizationGrantDAOImpl.java From arcusplatform with Apache License 2.0 | 5 votes |
@Override public void removeForPlace(UUID placeId) { try(Context ctxt = removeForPlaceTimer.time()) { List<AuthorizationGrant> grants = findForPlace(placeId); Statement statement = QueryBuilder.delete().from(AUTHORIZATION_GRANT_TABLE) .where(QueryBuilder.in(Cols.ENTITY_ID, grants.stream().map(AuthorizationGrant::getEntityId).collect(Collectors.toList()))) .and(QueryBuilder.eq(Cols.PLACE_ID, placeId)); statement.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM); BatchStatement batch = new BatchStatement(); batch.add(statement); batch.add(new BoundStatement(removeForPlace).bind(placeId)); session.execute(batch); } }
Example 14
Source File: CassandraOperationImpl.java From sunbird-lms-service with MIT License | 5 votes |
/** * This method updates all the records in a batch * * @param keyspaceName * @param tableName * @param records * @return */ // @Override public Response batchUpdateById( String keyspaceName, String tableName, List<Map<String, Object>> records) { long startTime = System.currentTimeMillis(); ProjectLogger.log( "Cassandra Service batchUpdateById method started at ==" + startTime, LoggerEnum.INFO); Session session = connectionManager.getSession(keyspaceName); Response response = new Response(); BatchStatement batchStatement = new BatchStatement(); ResultSet resultSet = null; try { for (Map<String, Object> map : records) { Update update = createUpdateStatement(keyspaceName, tableName, map); batchStatement.add(update); } resultSet = session.execute(batchStatement); response.put(Constants.RESPONSE, Constants.SUCCESS); } catch (QueryExecutionException | QueryValidationException | NoHostAvailableException | IllegalStateException e) { ProjectLogger.log("Cassandra Batch Update Failed." + e.getMessage(), e); throw new ProjectCommonException( ResponseCode.SERVER_ERROR.getErrorCode(), ResponseCode.SERVER_ERROR.getErrorMessage(), ResponseCode.SERVER_ERROR.getResponseCode()); } logQueryElapseTime("batchUpdateById", startTime); return response; }
Example 15
Source File: MobileDeviceDAOImpl.java From arcusplatform with Apache License 2.0 | 4 votes |
private void addUpsertToBatch(BatchStatement batch, MobileDevice device) { batch.add(mobileDeviceUpsert(device)); }
Example 16
Source File: QueueMessageSerializationImpl.java From usergrid with Apache License 2.0 | 4 votes |
@Override public void timeoutInflight( DatabaseQueueMessage message ) { logger.trace("timeoutInflight {}", message.getQueueMessageId() ); // create statement to write queue message back to available table, with new UUID UUID newQueueMessageId = QakkaUtils.getTimeUuid(); DatabaseQueueMessage newMessage = new DatabaseQueueMessage( message.getMessageId(), DatabaseQueueMessage.Type.DEFAULT, message.getQueueName(), message.getRegion(), null, System.currentTimeMillis(), -1L, newQueueMessageId ); Statement write = createWriteMessageStatement( newMessage ); // create statement to remove message from inflight table Statement delete = createDeleteMessageStatement( message.getQueueName(), message.getRegion(), message.getShardId(), message.getType(), message.getQueueMessageId()); // execute statements as a batch BatchStatement batchStatement = new BatchStatement(); batchStatement.add( write ); batchStatement.add( delete ); cassandraClient.getQueueMessageSession().execute( batchStatement ); // bump counters shardCounterSerialization.incrementCounter( message.getQueueName(), Shard.Type.DEFAULT, message.getShardId(), 1 ); messageCounterSerialization.incrementCounter( message.getQueueName(), DatabaseQueueMessage.Type.DEFAULT, 1L ); messageCounterSerialization.decrementCounter( message.getQueueName(), DatabaseQueueMessage.Type.INFLIGHT, 1L ); }
Example 17
Source File: BaseCassandraCRUDDao.java From arcusplatform with Apache License 2.0 | 4 votes |
private void addToBatch(BatchStatement batch, List<Statement> statements) { for(Statement statement : statements) { batch.add(statement); } }
Example 18
Source File: CassandraOperationImpl.java From sunbird-lms-service with MIT License | 4 votes |
@Override public Response batchInsertWithTTL( String keyspaceName, String tableName, List<Map<String, Object>> records, List<Integer> ttls) { long startTime = System.currentTimeMillis(); ProjectLogger.log( "CassandraOperationImpl:batchInsertWithTTL: call started at " + startTime, LoggerEnum.INFO); if (CollectionUtils.isEmpty(records) || CollectionUtils.isEmpty(ttls)) { ProjectLogger.log( "CassandraOperationImpl:batchInsertWithTTL: records or ttls is empty", LoggerEnum.ERROR); ProjectCommonException.throwServerErrorException(ResponseCode.SERVER_ERROR); } if (ttls.size() != records.size()) { ProjectLogger.log( "CassandraOperationImpl:batchInsertWithTTL: Mismatch of records and ttls list size", LoggerEnum.ERROR); ProjectCommonException.throwServerErrorException(ResponseCode.SERVER_ERROR); } Session session = connectionManager.getSession(keyspaceName); Response response = new Response(); BatchStatement batchStatement = new BatchStatement(); ResultSet resultSet = null; Iterator<Integer> ttlIterator = ttls.iterator(); try { for (Map<String, Object> map : records) { Insert insert = QueryBuilder.insertInto(keyspaceName, tableName); map.entrySet() .stream() .forEach( x -> { insert.value(x.getKey(), x.getValue()); }); if (ttlIterator.hasNext()) { Integer ttlVal = ttlIterator.next(); if (ttlVal != null & ttlVal > 0) { insert.using(QueryBuilder.ttl(ttlVal)); } } batchStatement.add(insert); } resultSet = session.execute(batchStatement); response.put(Constants.RESPONSE, Constants.SUCCESS); } catch (QueryExecutionException | QueryValidationException | NoHostAvailableException | IllegalStateException e) { ProjectLogger.log( "CassandraOperationImpl:batchInsertWithTTL: Exception occurred with error message = " + e.getMessage(), e); throw new ProjectCommonException( ResponseCode.SERVER_ERROR.getErrorCode(), ResponseCode.SERVER_ERROR.getErrorMessage(), ResponseCode.SERVER_ERROR.getResponseCode()); } logQueryElapseTime("batchInsertWithTTL", startTime); return response; }
Example 19
Source File: UniqueValueSerializationStrategyImplTest.java From usergrid with Apache License 2.0 | 2 votes |
@Test public void twoFieldsPerVersion() throws ConnectionException, InterruptedException { ApplicationScope scope = new ApplicationScopeImpl( new SimpleId( "organization" ) ); Id entityId = new SimpleId( UUIDGenerator.newTimeUUID(), "entity" ); final UUID version1 = UUIDGenerator.newTimeUUID(); //write V1 of everything IntegerField version1Field1 = new IntegerField( "count", 1 ); StringField version1Field2 = new StringField("field", "v1value"); UniqueValue version1Field1Value = new UniqueValueImpl( version1Field1, entityId, version1 ); UniqueValue version1Field2Value = new UniqueValueImpl( version1Field2, entityId, version1 ); //final MutationBatch batch = strategy.write( scope, version1Field1Value ); //batch.mergeShallow( strategy.write( scope, version1Field2Value ) ); final BatchStatement batch = new BatchStatement(); batch.add(strategy.writeCQL( scope, version1Field1Value, -1)); batch.add(strategy.writeCQL( scope, version1Field2Value, -1)); //write V2 of everything final UUID version2 = UUIDGenerator.newTimeUUID(); IntegerField version2Field1 = new IntegerField( "count", 2 ); StringField version2Field2 = new StringField( "field", "v2value" ); UniqueValue version2Field1Value = new UniqueValueImpl( version2Field1, entityId, version2 ); UniqueValue version2Field2Value = new UniqueValueImpl( version2Field2, entityId, version2 ); //batch.mergeShallow( strategy.write( scope, version2Field1Value ) ); //batch.mergeShallow( strategy.write( scope, version2Field2Value ) ); batch.add(strategy.writeCQL( scope, version2Field1Value, -1)); batch.add(strategy.writeCQL( scope, version2Field2Value, -1)); session.execute(batch); //batch.execute(); UniqueValueSet fields = strategy.load( scope, entityId.getType(), Arrays.<Field>asList( version1Field1, version1Field2 ) ); UniqueValue retrieved = fields.getValue( version1Field1.getName() ); assertEquals( version1Field1Value, retrieved ); retrieved = fields.getValue( version1Field2.getName() ); assertEquals( version1Field2Value, retrieved ); Iterator<UniqueValue> allFieldsWritten = strategy.getAllUniqueFields( scope, entityId ); assertTrue(allFieldsWritten.hasNext()); //test this interface. In most cases, we won't know the field name, so we want them all UniqueValue allFieldsValue = allFieldsWritten.next(); //version 2 fields should come first, ordered by field name assertEquals( version2Field1, allFieldsValue.getField() ); assertEquals( version2, allFieldsValue.getEntityVersion() ); allFieldsValue = allFieldsWritten.next(); assertEquals( version2Field2, allFieldsValue.getField() ); assertEquals( version2, allFieldsValue.getEntityVersion() ); //version 1 should come next ordered by field name allFieldsValue = allFieldsWritten.next(); assertEquals( version1Field1, allFieldsValue.getField() ); assertEquals( version1, allFieldsValue.getEntityVersion() ); allFieldsValue = allFieldsWritten.next(); assertEquals( version1Field2, allFieldsValue.getField() ); assertEquals( version1, allFieldsValue.getEntityVersion() ); assertFalse(allFieldsWritten.hasNext()); }
Example 20
Source File: WriteUniqueVerify.java From usergrid with Apache License 2.0 | 2 votes |
private void verifyUniqueFields(CollectionIoEvent<MvccEntity> ioevent) { MvccValidationUtils.verifyMvccEntityWithEntity( ioevent.getEvent() ); final MvccEntity mvccEntity = ioevent.getEvent(); final Entity entity = mvccEntity.getEntity().get(); final ApplicationScope scope = ioevent.getEntityCollection(); final BatchStatement batch = new BatchStatement(); //allocate our max size, worst case final List<Field> uniqueFields = new ArrayList<>( entity.getFields().size() ); // // Construct all the functions for verifying we're unique // final Map<String, Field> preWriteUniquenessViolations = new HashMap<>( uniqueFields.size() ); for ( final Field field : EntityUtils.getUniqueFields(entity)) { // if it's unique, create a function to validate it and add it to the list of // concurrent validations // use write-first then read strategy final UniqueValue written = new UniqueValueImpl( field, mvccEntity.getId(), mvccEntity.getVersion() ); // don't use read repair on this pre-write check UniqueValueSet set = uniqueValueStrat.load(scope, cassandraFig.getDataStaxReadCl(), written.getEntityId().getType(), Collections.singletonList(written.getField()), false); set.forEach(uniqueValue -> { if(!uniqueValue.getEntityId().getUuid().equals(written.getEntityId().getUuid())){ if(logger.isTraceEnabled()){ logger.trace("Pre-write violation detected. Attempted write for unique value [{}={}] and " + "entity id [{}], entity version [{}] conflicts with already existing entity id [{}], " + "entity version [{}]", written.getField().getName(), written.getField().getValue().toString(), written.getEntityId().getUuid(), written.getEntityVersion(), uniqueValue.getEntityId().getUuid(), uniqueValue.getEntityVersion()); } preWriteUniquenessViolations.put(field.getName(), field); } }); // only build the batch statement if we don't have a violation for the field if( preWriteUniquenessViolations.get(field.getName()) == null) { // use TTL in case something goes wrong before entity is finally committed batch.add(uniqueValueStrat.writeCQL(scope, written, serializationFig.getTimeout())); uniqueFields.add(field); } } if(preWriteUniquenessViolations.size() > 0 ){ if(logger.isTraceEnabled()){ logger.trace("Pre-write unique violations found, raising exception before executing first write"); } throw new WriteUniqueVerifyException(mvccEntity, scope, preWriteUniquenessViolations ); } //short circuit nothing to do if ( uniqueFields.size() == 0 ) { return ; } //perform the write session.execute(batch); // use simple thread pool to verify fields in parallel ConsistentReplayCommand cmd = new ConsistentReplayCommand( uniqueValueStrat,cassandraFig,scope, entity.getId().getType(), uniqueFields,entity); Map<String,Field> uniquenessViolations = cmd.execute(); //do we want to do this? //We have violations, throw an exception if ( !uniquenessViolations.isEmpty() ) { throw new WriteUniqueVerifyException( mvccEntity, ioevent.getEntityCollection(), uniquenessViolations ); } }