Java Code Examples for com.datastax.driver.core.Session#execute()

The following examples show how to use com.datastax.driver.core.Session#execute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraQueryExecutor.java    From arcusplatform with Apache License 2.0 7 votes vote down vote up
public static <T> PagedResults<T> page(Session session, BoundStatement statement, int limit, Function<Row, T> transformer, Function<Row, String> token) {
   List<T> results = new ArrayList<>(limit);
   statement.setFetchSize(limit + 1);
   ResultSet rs = session.execute( statement );
   Row row = rs.one();
   while(row != null && results.size() < limit) {
      try {
         T result = transformer.apply(row);
         results.add(result);
      }
      catch(Exception e) {
         log.warn("Unable to deserialize row {}", row, e);
      }
      row = rs.one();
   }
   if(row == null) {
      return PagedResults.newPage(results); 
   }
   else {
      return PagedResults.newPage(results, token.apply(row));
   }
}
 
Example 2
Source File: CassandraSinkIT.java    From ingestion with Apache License 2.0 6 votes vote down vote up
@Test
public void initializeCqlTwice() throws TTransportException, IOException, InterruptedException {
  final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint();
  Cluster cluster = Cluster.builder()
      .addContactPointsWithPorts(Collections.singletonList(contactPoint))
      .build();
  Session session = cluster.connect();

  session.execute("DROP KEYSPACE IF EXISTS keyspaceTestCassandraSinkIT");
  Assert.assertNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
  _do();
  Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
  Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT")
      .getTable("tableTestCassandraSinkIT"));
  _do();
  Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT"));
  Assert.assertNotNull(session.getCluster().getMetadata().getKeyspace("keyspaceTestCassandraSinkIT")
      .getTable("tableTestCassandraSinkIT"));
  session.execute("DROP KEYSPACE IF EXISTS keyspaceTestCassandraSinkIT");

  session.close();
  cluster.close();
}
 
Example 3
Source File: SessionWithInitializedTablesFactoryTest.java    From james-project with Apache License 2.0 6 votes vote down vote up
@Test
void createSessionShouldKeepTheSetSchemaVersionWhenTypesAndTablesHavePartiallyChanged() {
    Session session = testee.get();
    assertThat(versionManager(session).computeVersion().block())
            .isEqualTo(MAX_VERSION);

    new CassandraTableManager(MODULE, session).clearAllTables();
    versionManagerDAO(session).updateVersion(MIN_VERSION);
    assertThat(versionManager(session).computeVersion().block())
            .isEqualTo(MIN_VERSION);
    session.execute(SchemaBuilder.dropTable(TABLE_NAME));
    session.execute(SchemaBuilder.dropType(TYPE_NAME));

    assertThat(versionManager(testee.get()).computeVersion().block())
            .isEqualTo(MIN_VERSION);
}
 
Example 4
Source File: DeleteKeyspaceCommand.java    From titus-control-plane with Apache License 2.0 6 votes vote down vote up
@Override
public void execute(CommandContext commandContext) {
    Session session = commandContext.getTargetSession();
    String keyspace = commandContext.getTargetKeySpace();

    if (keyspace.contains("main")) {
        throw new IllegalArgumentException("Cannot delete keyspaces that contain the word main");
    }

    boolean keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
    if (!keyspaceExists) {
        throw new IllegalStateException("Keyspace: " + keyspace + " does not exist.");
    }

    session.execute("DROP KEYSPACE " + keyspace);

    keyspaceExists = session.getCluster().getMetadata().getKeyspace(keyspace) != null;
    if (keyspaceExists) {
        throw new IllegalStateException("Keyspace: " + keyspace + " exists after deletion.");
    }
}
 
Example 5
Source File: CassandraOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void insertEventsInTable(int numEvents)
{
  try {
    Cluster cluster = Cluster.builder().addContactPoint(NODE).build();
    Session session = cluster.connect(KEYSPACE);

    String insert = "INSERT INTO " + TABLE_NAME_INPUT + " (ID,lastname,age)" + " VALUES (?,?,?);";
    PreparedStatement stmt = session.prepare(insert);
    BoundStatement boundStatement = new BoundStatement(stmt);
    for (int i = 0; i < numEvents; i++) {
      ids.add(i);
      mapNames.put(i, "test" + i);
      mapAge.put(i, i + 10);
      session.execute(boundStatement.bind(i, "test" + i, i + 10));
    }
  } catch (DriverException e) {
    throw new RuntimeException(e);
  }
}
 
Example 6
Source File: DAbstractMetricsRW.java    From blueflood with Apache License 2.0 5 votes vote down vote up
/**
 * This method inserts a collection of {@link com.rackspacecloud.blueflood.service.SingleRollupWriteContext} objects
 * to the appropriate Cassandra column family.
 *
 * It performs the inserts by executing an UNLOGGED BATCH statement.
 *
 * @param writeContexts
 *
 * @throws IOException
 */
@Override
public void insertRollups(List<SingleRollupWriteContext> writeContexts) {

    if (writeContexts.size() == 0) {
        return;
    }

    Timer.Context ctx = Instrumentation.getWriteTimerContext( writeContexts.get( 0 ).getDestinationCF().getName() );
    try {

        BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED);

        for (SingleRollupWriteContext writeContext : writeContexts) {
            Rollup rollup = writeContext.getRollup();
            Locator locator = writeContext.getLocator();
            Granularity granularity = writeContext.getGranularity();
            int ttl = getTtl(locator, rollup.getRollupType(), granularity);

            // lookup the right writer
            RollupType rollupType = writeContext.getRollup().getRollupType();
            DAbstractMetricIO io = getIO(rollupType.name().toLowerCase(), granularity);

            Statement statement = io.createStatement(locator, writeContext.getTimestamp(), rollup, writeContext.getGranularity(), ttl);
            batch.add(statement);
        }

        Session session = DatastaxIO.getSession();
        session.execute(batch);

    } catch (Exception ex) {
        Instrumentation.markWriteError();
        LOG.error(String.format("error writing locator batch of size %s, granularity %s", writeContexts.size(), writeContexts.get(0).getGranularity()), ex);
    } finally {
        ctx.stop();
    }
}
 
Example 7
Source File: CaseController.java    From skywalking with Apache License 2.0 5 votes vote down vote up
private void execute(Session session) {
    logger.info("execute in sync");

    ResultSet createKeyspaceDataResultSet = session.execute(CREATE_KEYSPACE_SQL);
    logger.info("CREATE KEYSPACE result: " + createKeyspaceDataResultSet.toString());

    ResultSet createTableDataResultSet = session.execute(CREATE_TABLE_SQL);
    logger.info("CREATE TABLE result: " + createTableDataResultSet.toString());

    PreparedStatement insertDataPreparedStatement = session.prepare(INSERT_DATA_SQL);
    ResultSet insertDataResultSet = session.execute(insertDataPreparedStatement.bind("101", "foobar"));
    logger.info("INSERT result: " + insertDataResultSet.toString());

    PreparedStatement selectDataPreparedStatement = session.prepare(SELECT_DATA_SQL);
    ResultSet resultSet = session.execute(selectDataPreparedStatement.bind("101"));
    Row row = resultSet.one();
    logger.info("SELECT result: id: {}, value: {}", row.getString("id"), row.getString("value"));

    PreparedStatement deleteDataPreparedStatement = session.prepare(DELETE_DATA_SQL);
    ResultSet deleteDataResultSet = session.execute(deleteDataPreparedStatement.bind("101"));
    logger.info("DELETE result: " + deleteDataResultSet.toString());

    ResultSet dropTableDataResultSet = session.execute(DROP_TABLE_SQL);
    logger.info("DROP TABLE result: " + dropTableDataResultSet.toString());

    ResultSet dropKeyspaceDataResultSet = session.execute(DROP_KEYSPACE);
    logger.info("DROP KEYSPACE result: " + dropKeyspaceDataResultSet.toString());
}
 
Example 8
Source File: EnrichedCustomerServiceCassandraOutputOperator.java    From examples with Apache License 2.0 5 votes vote down vote up
@Override
protected void createBusinessTables(Session session)
{

  String createTable = "CREATE TABLE IF NOT EXISTS " + cassandraConfig.getDatabase() + "."
      + cassandraConfig.getTableName()
      + " (id bigint PRIMARY KEY, imsi text, totalDuration int, wait int, zipCode text, issueType text, satisfied boolean, "
      + " operatorCode text, deviceBrand text, deviceModel text)";
  session.execute(createTable);
}
 
Example 9
Source File: AppBase.java    From yb-sample-apps with Apache License 2.0 5 votes vote down vote up
/**
 * The apps extending this base should create all the necessary tables in this method.
 * @throws java.lang.Exception in case of CREATE statement errors.
 */
public void createTablesIfNeeded() throws Exception {
  for (String create_stmt : getCreateTableStatements()) {
    Session session = getCassandraClient();
    // consistency level of one to allow cross DC requests.
    session.execute(
      session.prepare(create_stmt)
        .setConsistencyLevel(ConsistencyLevel.ONE)
        .bind());
    LOG.info("Created a Cassandra table using query: [" + create_stmt + "]");
  }
}
 
Example 10
Source File: HttpPrimeQueryIntegrationTest.java    From simulacron with Apache License 2.0 5 votes vote down vote up
@Test
public void testNoDelayOnPreparedStatementWhenIgnoreOnPrepareIsFalse() throws Exception {
  Prime prime =
      PrimeDsl.when("select * from table where c1=?")
          .then(noRows())
          .delay(2, TimeUnit.SECONDS)
          .ignoreOnPrepare()
          .build();
  HttpTestResponse response = server.prime(prime.getPrimedRequest());
  assertNotNull(response);
  RequestPrime responseQuery = om.readValue(response.body, RequestPrime.class);
  assertThat(responseQuery).isEqualTo(prime.getPrimedRequest());

  String contactPoint = HttpTestUtil.getContactPointString(server.getCluster(), 0);
  try (com.datastax.driver.core.Cluster cluster =
      defaultBuilder().addContactPoint(contactPoint).build()) {
    Session session = cluster.connect();
    long start = System.currentTimeMillis();
    PreparedStatement prepared = session.prepare("select * from table where c1=?");
    long duration = System.currentTimeMillis() - start;
    // should not have applied delay to prepare.
    assertThat(duration).isLessThan(2000);
    start = System.currentTimeMillis();
    session.execute(prepared.bind());
    duration = System.currentTimeMillis() - start;
    // should have taken longer than 2 seconds as delay is applied to execute.
    assertThat(duration).isGreaterThan(2000);
  }
}
 
Example 11
Source File: HttpTestUtil.java    From simulacron with Apache License 2.0 5 votes vote down vote up
private static ResultSet executeQueryWithFreshSession(
    Statement statement,
    String contactPoint,
    Session session,
    com.datastax.driver.core.Cluster cluster) {
  if (session == null) {
    cluster = defaultBuilder().addContactPoint(contactPoint).build();
    session = cluster.connect();
  }
  ResultSet rs = session.execute(statement);
  cluster.close();
  ;
  return rs;
}
 
Example 12
Source File: CassandraOperationImpl.java    From sunbird-lms-service with MIT License 5 votes vote down vote up
@Override
public Response batchUpdate(
    String keyspaceName, String tableName, List<Map<String, Map<String, Object>>> list) {

  Session session = connectionManager.getSession(keyspaceName);
  BatchStatement batchStatement = new BatchStatement();
  long startTime = System.currentTimeMillis();
  ProjectLogger.log(
      "Cassandra Service batchUpdate method started at ==" + startTime, LoggerEnum.INFO);
  Response response = new Response();
  ResultSet resultSet = null;
  try {
    for (Map<String, Map<String, Object>> record : list) {
      Map<String, Object> primaryKey = record.get(JsonKey.PRIMARY_KEY);
      Map<String, Object> nonPKRecord = record.get(JsonKey.NON_PRIMARY_KEY);
      batchStatement.add(
          CassandraUtil.createUpdateQuery(primaryKey, nonPKRecord, keyspaceName, tableName));
    }
    resultSet = session.execute(batchStatement);
    response.put(Constants.RESPONSE, Constants.SUCCESS);
  } catch (Exception ex) {
    ProjectLogger.log("Cassandra Batch Update failed " + ex.getMessage(), ex);
    throw new ProjectCommonException(
        ResponseCode.SERVER_ERROR.getErrorCode(),
        ResponseCode.SERVER_ERROR.getErrorMessage(),
        ResponseCode.SERVER_ERROR.getResponseCode());
  }
  logQueryElapseTime("batchUpdate", startTime);
  return response;
}
 
Example 13
Source File: CassandraOperationImpl.java    From sunbird-lms-service with MIT License 5 votes vote down vote up
@Override
public Response batchInsert(
    String keyspaceName, String tableName, List<Map<String, Object>> records) {

  long startTime = System.currentTimeMillis();
  ProjectLogger.log(
      "Cassandra Service batchInsert method started at ==" + startTime, LoggerEnum.INFO);

  Session session = connectionManager.getSession(keyspaceName);
  Response response = new Response();
  BatchStatement batchStatement = new BatchStatement();
  ResultSet resultSet = null;

  try {
    for (Map<String, Object> map : records) {
      Insert insert = QueryBuilder.insertInto(keyspaceName, tableName);
      map.entrySet()
          .stream()
          .forEach(
              x -> {
                insert.value(x.getKey(), x.getValue());
              });
      batchStatement.add(insert);
    }
    resultSet = session.execute(batchStatement);
    response.put(Constants.RESPONSE, Constants.SUCCESS);
  } catch (QueryExecutionException
      | QueryValidationException
      | NoHostAvailableException
      | IllegalStateException e) {
    ProjectLogger.log("Cassandra Batch Insert Failed." + e.getMessage(), e);
    throw new ProjectCommonException(
        ResponseCode.SERVER_ERROR.getErrorCode(),
        ResponseCode.SERVER_ERROR.getErrorMessage(),
        ResponseCode.SERVER_ERROR.getResponseCode());
  }
  logQueryElapseTime("batchInsert", startTime);
  return response;
}
 
Example 14
Source File: ErrorResultIntegrationTest.java    From simulacron with Apache License 2.0 5 votes vote down vote up
private ResultSet prepareAndQuery() throws Exception {
  try (com.datastax.driver.core.Cluster driverCluster =
      defaultBuilder(server.getCluster())
          .withRetryPolicy(FallthroughRetryPolicy.INSTANCE)
          .build()) {
    Session session = driverCluster.connect();
    PreparedStatement prepared = session.prepare(query);
    return session.execute(prepared.bind());
  }
}
 
Example 15
Source File: CassandraOperationImpl.java    From sunbird-lms-service with MIT License 5 votes vote down vote up
@Override
public Response getRecordsByProperty(
    String keyspaceName,
    String tableName,
    String propertyName,
    Object propertyValue,
    List<String> fields) {
  Response response = new Response();
  Session session = connectionManager.getSession(keyspaceName);
  try {
    Builder selectBuilder;
    if (CollectionUtils.isNotEmpty(fields)) {
      selectBuilder = QueryBuilder.select((String[]) fields.toArray());
    } else {
      selectBuilder = QueryBuilder.select().all();
    }
    Statement selectStatement =
        selectBuilder.from(keyspaceName, tableName).where(eq(propertyName, propertyValue));
    ResultSet results = null;
    results = session.execute(selectStatement);
    response = CassandraUtil.createResponse(results);
  } catch (Exception e) {
    ProjectLogger.log(Constants.EXCEPTION_MSG_FETCH + tableName + " : " + e.getMessage(), e);
    throw new ProjectCommonException(
        ResponseCode.SERVER_ERROR.getErrorCode(),
        ResponseCode.SERVER_ERROR.getErrorMessage(),
        ResponseCode.SERVER_ERROR.getResponseCode());
  }
  return response;
}
 
Example 16
Source File: CassandraInjectedResource.java    From dropwizard-cassandra with Apache License 2.0 4 votes vote down vote up
private List<String> query(Session session) {
	final ResultSet resultSet = session.execute("SELECT * FROM system_schema.columns");
	return resultSet.all().stream()
			.map(r -> r.getString(0))
			.collect(Collectors.toList());
}
 
Example 17
Source File: CqlExecuter.java    From Karaf-Cassandra with Apache License 2.0 4 votes vote down vote up
@Override
public Object doExecute() throws Exception {

	Session session = (Session) this.session
			.get(SessionParameter.CASSANDRA_SESSION);

	if (session == null) {
		System.err
				.println("No active session found--run the connect command first");
		return null;
	}

	if (cql == null && fileLocation == null) {
		System.err
				.println("Either cql skript or a filename must be given.");
		return null;
	}

	if (cql == null && fileLocation != null) {
		byte[] encoded;
		InputStream is = fileLocation.toURL().openStream ();
		try (ByteArrayOutputStream os = new ByteArrayOutputStream();) {
	        byte[] buffer = new byte[0xFFFF];

	        for (int len; (len = is.read(buffer)) != -1;)
	            os.write(buffer, 0, len);

	        os.flush();

	        encoded = os.toByteArray();
	    } catch (IOException e) {
	    	System.err.println("Can't read fileinput");
	        return null;
	    }

		cql = new String(encoded, Charset.defaultCharset());
	} else {
		int start = 0;
		int end = 0;
		if (cql.startsWith("\"")) {
			//need to remove quotes first
			start = 1;
			if (cql.endsWith("\"")) {
				end = cql.lastIndexOf("\"");
			}
			cql = cql.substring(start, end);
		}
	}

	ShellTable table = new ShellTable();

	ResultSet execute = session.execute(cql);

	cassandraRowFormater(table, execute);

	table.print(System.out);
	
	return null;
}
 
Example 18
Source File: HintsPollerTest.java    From emodb with Apache License 2.0 4 votes vote down vote up
@Ignore
@Test
public void queryIsExecutedOnAllClusterNodesAndAlsoTheOldestHintIsPicked() {

    // ***** TODO: Get a cluster with 2 nodes. (RUN WITH PERFTEST LATER.....) ******
    // Ignoring this test for now

    // Insert hints on all the cluster nodes.
    Cluster cluster = Cluster.builder()
            .addContactPoint("127.0.0.1")
            .withPort(9164)
            .withLoadBalancingPolicy(new SelectedHostLoadBalancingPolicyForTest())
            .build();
    Metadata metadata = cluster.getMetadata();
    Session clusterSession = cluster.connect();

    long hintTimestamp = System.currentTimeMillis();
    for (Host host : metadata.getAllHosts()) {
        SelectedHostStatement selectedHostStatement = new SelectedHostStatement(new SimpleStatement(getInsertHintsQuery(hintTimestamp)), host);
        clusterSession.execute(selectedHostStatement);
        hintTimestamp = hintTimestamp + 10000;
    }

    // Now check if the query ran on EVERY node of the cluster.
    Assert.assertEquals(ALL_SELECTED_HOSTS.size(), 2);

    // Get the oldest hint Timestamp of the cluster
    ClusterHintsPoller clusterHintsPoller = new ClusterHintsPoller();
    HintsPollerResult oldestHintsInfo = clusterHintsPoller.getOldestHintsInfo(clusterSession);

    // Note: ?? - This will make the test fail even if one node is down or a connection problem with just one node.
    Assert.assertNotNull(oldestHintsInfo);
    Assert.assertEquals(oldestHintsInfo.getAllPolledHosts().size(), 2);

    long retrievedHintTimeStamp = oldestHintsInfo.getOldestHintTimestamp().or(Long.MAX_VALUE);

    Assert.assertEquals(retrievedHintTimeStamp, hintTimestamp);

    cluster.close();
    clusterSession.close();
}
 
Example 19
Source File: GenerateHubPartitionId.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
public void execute(ExecutionContext context, boolean autoRollback) throws CommandExecutionException {
   Session session = context.getSession();
   PreparedStatement update = session.prepare(UPSERT_PARTITIONID);
   
   BoundStatement select = session.prepare(SELECT).bind();
   select.setConsistencyLevel(ConsistencyLevel.ALL);
   ResultSet rs = context.getSession().execute(select);
   int count = 0;
   int [] hubsPerPartition = new int[partitionCount];
   logger.info("Preparing to partition hub ids");
   long startTimeNs = System.nanoTime();
   for(Row row: rs) {
      String hubId = row.getString("id");
      UUID placeId = row.getUUID("placeid");
      int partitionId;
      if(placeId == null) {
         Matcher m = PATTERN_HUBID.matcher(hubId);
         if(!m.matches()) {
            logger.warn("Invalid hub id: [{}]", hubId);
            return;
         }
         String hubNum = m.group(1);
         partitionId = Integer.parseInt(hubNum) % partitionCount;  
      }
      else {
         partitionId = (int) (Math.floorMod(placeId.getLeastSignificantBits(), partitionCount));
      }

      logger.debug("Adding [{}] to partition [{}]", hubId, partitionId);
      BoundStatement bs = update.bind(partitionId, hubId);
      session.execute(bs);
      
      count++;
      hubsPerPartition[partitionId]++;
   }
   long duration = System.nanoTime() - startTimeNs;
   logger.info("Partitioned {} hubs in {} secs", count, duration / (float) TimeUnit.NANOSECONDS.toSeconds(1));
   for(int i=0; i<partitionCount; i++) {
      logger.info(String.format("%03d: %3d hubs", i, hubsPerPartition[i]));
   }
}
 
Example 20
Source File: Migration021.java    From cassandra-reaper with Apache License 2.0 4 votes vote down vote up
/**
 * Apply TWCS for metrics tables if the Cassandra version allows it.
 */
public static void migrate(Session session, String keyspace) {

  VersionNumber lowestNodeVersion = session.getCluster().getMetadata().getAllHosts()
      .stream()
      .map(host -> host.getCassandraVersion())
      .min(VersionNumber::compareTo)
      .get();

  if ((VersionNumber.parse("3.0.8").compareTo(lowestNodeVersion) <= 0
      && VersionNumber.parse("3.0.99").compareTo(lowestNodeVersion) >= 0)
      || VersionNumber.parse("3.8").compareTo(lowestNodeVersion) <= 0) {
    try {
      if (!isUsingTwcs(session, keyspace)) {
        LOG.info("Altering {} to use TWCS...", METRICS_V1_TABLE);
        session.execute(
                "ALTER TABLE " + METRICS_V1_TABLE + " WITH compaction = {'class': 'TimeWindowCompactionStrategy', "
                    + "'unchecked_tombstone_compaction': 'true', "
                    + "'compaction_window_size': '2', "
                    + "'compaction_window_unit': 'MINUTES'}");

        LOG.info("Altering {} to use TWCS...", METRICS_V2_TABLE);
        session.execute(
                "ALTER TABLE " + METRICS_V2_TABLE + " WITH compaction = {'class': 'TimeWindowCompactionStrategy', "
                    + "'unchecked_tombstone_compaction': 'true', "
                    + "'compaction_window_size': '1', "
                    + "'compaction_window_unit': 'HOURS'}");

        LOG.info("{} was successfully altered to use TWCS.", METRICS_V2_TABLE);

        LOG.info("Altering {} to use TWCS...", OPERATIONS_TABLE);
        session.execute(
                "ALTER TABLE " + OPERATIONS_TABLE + " WITH compaction = {'class': 'TimeWindowCompactionStrategy', "
                    + "'unchecked_tombstone_compaction': 'true', "
                    + "'compaction_window_size': '1', "
                    + "'compaction_window_unit': 'HOURS'}");

        LOG.info("{} was successfully altered to use TWCS.", OPERATIONS_TABLE);
      }
    } catch (RuntimeException e) {
      LOG.error("Failed altering metrics tables to TWCS", e);
    }
  }

}