Java Code Examples for com.datastax.driver.core.PreparedStatement#bind()

The following examples show how to use com.datastax.driver.core.PreparedStatement#bind() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Common.java    From glowroot with Apache License 2.0 6 votes vote down vote up
static List<NeedsRollupFromChildren> getNeedsRollupFromChildrenList(String agentRollupId,
        PreparedStatement readNeedsRollupFromChild, Session session) throws Exception {
    BoundStatement boundStatement = readNeedsRollupFromChild.bind();
    boundStatement.setString(0, agentRollupId);
    ResultSet results = session.read(boundStatement);
    Map<Long, NeedsRollupFromChildren> needsRollupFromChildrenMap = new LinkedHashMap<>();
    for (Row row : results) {
        int i = 0;
        long captureTime = checkNotNull(row.getTimestamp(i++)).getTime();
        UUID uniqueness = row.getUUID(i++);
        String childAgentRollupId = checkNotNull(row.getString(i++));
        Set<String> keys = checkNotNull(row.getSet(i++, String.class));
        NeedsRollupFromChildren needsRollup = needsRollupFromChildrenMap.get(captureTime);
        if (needsRollup == null) {
            needsRollup = new NeedsRollupFromChildren(captureTime);
            needsRollupFromChildrenMap.put(captureTime, needsRollup);
        }
        for (String key : keys) {
            needsRollup.keys.put(key, childAgentRollupId);
        }
        needsRollup.uniquenessKeysForDeletion.add(uniqueness);
    }
    return ImmutableList.copyOf(needsRollupFromChildrenMap.values());
}
 
Example 2
Source File: EventRepositoryCassandraEtlImpl.java    From konker-platform with Apache License 2.0 6 votes vote down vote up
public void saveEvent(Tenant tenant, Application application, Event event, String table) {
    PreparedStatement ps = getInsertIncomingPreparedStatement(table);

    BoundStatement statement = ps.bind(tenant.getDomainName(),
            application.getName(),
            event.getEpochTime(),
            event.getIncoming().getChannel(),
            event.getIncoming().getDeviceGuid(),
            event.getIncoming().getDeviceId(),
            event.getIncoming().getLocationGuid(),
            Optional.ofNullable(event.getGeolocation()).isPresent() ? event.getGeolocation().getElev() : null,
            Optional.ofNullable(event.getGeolocation()).isPresent() ? event.getGeolocation().getHdop() : null,
            Optional.ofNullable(event.getGeolocation()).isPresent() ? event.getGeolocation().getLat() : null,
            Optional.ofNullable(event.getGeolocation()).isPresent() ? event.getGeolocation().getLon() : null,
            event.getIngestedTimestamp().toEpochMilli() * 1000000, // nanoseconds
            event.getPayload());

    session.execute(statement);
}
 
Example 3
Source File: CassandraBaseTimeseriesDao.java    From iotplatform with Apache License 2.0 6 votes vote down vote up
private AsyncFunction<List<Long>, List<ResultSet>> getFetchChunksAsyncFunction(EntityId entityId, String key, Aggregation aggregation, long startTs, long endTs) {
    return partitions -> {
        try {
            PreparedStatement proto = getFetchStmt(aggregation);
            List<ResultSetFuture> futures = new ArrayList<>(partitions.size());
            for (Long partition : partitions) {
                log.trace("Fetching data for partition [{}] for entityType {} and entityId {}", partition, entityId.getEntityType(), entityId.getId());
                BoundStatement stmt = proto.bind();
                stmt.setString(0, entityId.getEntityType().name());
                stmt.setUUID(1, entityId.getId());
                stmt.setString(2, key);
                stmt.setLong(3, partition);
                stmt.setLong(4, startTs);
                stmt.setLong(5, endTs);
                log.debug("Generated query [{}] for entityType {} and entityId {}", stmt, entityId.getEntityType(), entityId.getId());
                futures.add(executeAsyncRead(stmt));
            }
            return Futures.allAsList(futures);
        } catch (Throwable e) {
            log.error("Failed to fetch data", e);
            throw e;
        }
    };
}
 
Example 4
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 6 votes vote down vote up
private void rewriteOpenIncidentTablePart1() throws Exception {
    if (!tableExists("open_incident")) {
        // must be upgrading all the way from a glowroot version prior to open_incident
        return;
    }
    dropTableIfExists("open_incident_temp");
    session.updateSchemaWithRetry("create table if not exists open_incident_temp (one int,"
            + " agent_rollup_id varchar, condition blob, severity varchar, notification blob,"
            + " open_time timestamp, primary key (one, agent_rollup_id, condition, severity))");
    PreparedStatement insertTempPS = session.prepare("insert into open_incident_temp (one,"
            + " agent_rollup_id, condition, severity, notification, open_time) values"
            + " (1, ?, ?, ?, ?, ?)");
    ResultSet results = session.read("select agent_rollup_id, condition, severity,"
            + " notification, open_time from open_incident where one = 1");
    for (Row row : results) {
        BoundStatement boundStatement = insertTempPS.bind();
        boundStatement.setString(0, row.getString(0));
        boundStatement.setBytes(1, row.getBytes(1));
        boundStatement.setString(2, row.getString(2));
        boundStatement.setBytes(3, row.getBytes(3));
        boundStatement.setTimestamp(4, row.getTimestamp(4));
        session.write(boundStatement);
    }
}
 
Example 5
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void rewriteAgentConfigTablePart2() throws Exception {
    if (!tableExists("agent_config_temp")) {
        // previously failed mid-upgrade prior to updating schema version
        return;
    }
    dropTableIfExists("agent_config");
    session.createTableWithLCS("create table if not exists agent_config (agent_rollup_id"
            + " varchar, config blob, config_update boolean, config_update_token uuid, primary"
            + " key (agent_rollup_id))");
    PreparedStatement insertPS = session.prepare("insert into agent_config"
            + " (agent_rollup_id, config, config_update, config_update_token) values"
            + " (?, ?, ?, ?)");
    Map<String, V09AgentRollup> v09AgentRollups = getV09AgentRollupsFromAgentRollupTable();
    ResultSet results = session.read("select agent_rollup_id, config, config_update,"
            + " config_update_token from agent_config_temp");
    for (Row row : results) {
        String v09AgentRollupId = row.getString(0);
        V09AgentRollup v09AgentRollup = v09AgentRollups.get(v09AgentRollupId);
        if (v09AgentRollup == null) {
            // v09AgentRollupId was manually deleted (via the UI) from the agent_rollup
            // table in which case its parent is no longer known and best to ignore
            continue;
        }
        BoundStatement boundStatement = insertPS.bind();
        boundStatement.setString(0, v09AgentRollup.agentRollupId());
        boundStatement.setBytes(1, row.getBytes(1));
        boundStatement.setBool(2, row.getBool(2));
        boundStatement.setUUID(3, row.getUUID(3));
        session.write(boundStatement);
    }
    dropTableIfExists("agent_config_temp");
}
 
Example 6
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void rewriteEnvironmentTablePart2() throws Exception {
    if (!tableExists("environment_temp")) {
        // previously failed mid-upgrade prior to updating schema version
        return;
    }
    dropTableIfExists("environment");
    session.createTableWithLCS("create table if not exists environment (agent_id varchar,"
            + " environment blob, primary key (agent_id))");
    PreparedStatement insertPS = session
            .prepare("insert into environment (agent_id, environment) values (?, ?)");
    Map<String, V09AgentRollup> v09AgentRollups = getV09AgentRollupsFromAgentRollupTable();
    ResultSet results = session.read("select agent_id, environment from environment_temp");
    for (Row row : results) {
        String v09AgentRollupId = row.getString(0);
        V09AgentRollup v09AgentRollup = v09AgentRollups.get(v09AgentRollupId);
        if (v09AgentRollup == null) {
            // v09AgentRollupId was manually deleted (via the UI) from the agent_rollup
            // table in which case its parent is no longer known and best to ignore
            continue;
        }
        BoundStatement boundStatement = insertPS.bind();
        boundStatement.setString(0, v09AgentRollup.agentRollupId());
        boundStatement.setBytes(1, row.getBytes(1));
        session.write(boundStatement);
    }
    dropTableIfExists("environment_temp");
}
 
Example 7
Source File: CQLTransaction.java    From Doradus with Apache License 2.0 5 votes vote down vote up
private BoundStatement addColumnUpdate(String tableName, String key, DColumn column, boolean isBinaryValue) {
    PreparedStatement prepState = m_dbservice.getPreparedUpdate(Update.INSERT_ROW, tableName);
    BoundStatement boundState = prepState.bind();
    boundState.setString(0, key);
    boundState.setString(1, column.getName());
    if (isBinaryValue) {
        boundState.setBytes(2, ByteBuffer.wrap(column.getRawValue()));
    } else {
        boundState.setString(2, column.getValue());
    }
    return boundState;
}
 
Example 8
Source File: CassAppScalePolicyStoreTest.java    From titus-control-plane with Apache License 2.0 5 votes vote down vote up
private void loadTestData() throws Exception {
    Session session = cassandraCQLUnit.getSession();
    String insertStmt = "INSERT INTO app_scale_policy(ref_id, job_id, status, value) VALUES(?, ?, ?, ?);";
    PreparedStatement stmt = session.prepare(insertStmt);

    // record 1
    String jobId = "job-1";
    String serializedValue = ObjectMappers.appScalePolicyMapper().writeValueAsString(buildAutoScalingPolicy(jobId).getPolicyConfiguration());
    BoundStatement boundStatement = stmt.bind(UUID.fromString(POLICY_1_ID), jobId, PolicyStatus.Pending.name(), serializedValue);
    session.execute(boundStatement);

    // record 2
    String jobIdTwo = "job-2";
    String serializedValueTwo = ObjectMappers.appScalePolicyMapper().writeValueAsString(buildAutoScalingPolicy(jobIdTwo).getPolicyConfiguration());
    boundStatement = stmt.bind(UUID.fromString(POLICY_2_ID), jobIdTwo, PolicyStatus.Pending.name(), serializedValueTwo);
    session.execute(boundStatement);

    // record 3
    boundStatement = stmt.bind(UUID.fromString(POLICY_3_ID), jobId, PolicyStatus.Pending.name(), serializedValue);
    session.execute(boundStatement);

    // insert job-policy relationship
    insertStmt = "INSERT INTO app_scale_jobs(job_id, ref_id) VALUES(?, ?);";
    stmt = session.prepare(insertStmt);

    boundStatement = stmt.bind("job-1", UUID.fromString(POLICY_1_ID));
    session.execute(boundStatement);
    boundStatement = stmt.bind("job-1", UUID.fromString(POLICY_3_ID));
    session.execute(boundStatement);
    boundStatement = stmt.bind("job-2", UUID.fromString(POLICY_2_ID));
    session.execute(boundStatement);
}
 
Example 9
Source File: CassandraRangeKeyValue.java    From yb-sample-apps with Apache License 2.0 5 votes vote down vote up
@Override
protected BoundStatement bindInsert(String key, ByteBuffer value)  {
  PreparedStatement prepared_stmt = getPreparedInsert(String.format(
      "INSERT INTO %s (k, r1, r2, r3, v) VALUES (?, ?, ?, ?, ?);",
      getTableName()));
  return prepared_stmt.bind(key, key, key, key, value);
}
 
Example 10
Source File: SelectStatementHandler.java    From scalardb with Apache License 2.0 5 votes vote down vote up
@Override
@Nonnull
protected BoundStatement bind(PreparedStatement prepared, Operation operation) {
  checkArgument(operation, Get.class, Scan.class);
  BoundStatement bound = prepared.bind();

  if (operation instanceof Get) {
    bound = bind(bound, (Get) operation);
  } else {
    bound = bind(bound, (Scan) operation);
  }

  return bound;
}
 
Example 11
Source File: InsertStatementHandler.java    From scalardb with Apache License 2.0 5 votes vote down vote up
@Override
@Nonnull
protected BoundStatement bind(PreparedStatement prepared, Operation operation) {
  checkArgument(operation, Put.class);

  BoundStatement bound = prepared.bind();
  bound = bind(bound, (Put) operation);

  return bound;
}
 
Example 12
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void rewriteTransactionTypeTablePart2() throws Exception {
    if (!tableExists("transaction_type_temp")) {
        // previously failed mid-upgrade prior to updating schema version
        return;
    }
    dropTableIfExists("transaction_type");
    Map<String, V09AgentRollup> v09AgentRollups = getV09AgentRollupsFromAgentRollupTable();
    session.createTableWithLCS("create table if not exists transaction_type (one int,"
            + " agent_rollup varchar, transaction_type varchar, primary key (one, agent_rollup,"
            + " transaction_type))");
    PreparedStatement insertPS = session.prepare("insert into transaction_type (one,"
            + " agent_rollup, transaction_type) values (1, ?, ?) using ttl ?");
    int ttl = getCentralStorageConfig(session).getMaxRollupTTL();
    ResultSet results = session.read(
            "select agent_rollup, transaction_type from transaction_type_temp where one = 1");
    for (Row row : results) {
        String v09AgentRollupId = row.getString(0);
        V09AgentRollup v09AgentRollup = v09AgentRollups.get(v09AgentRollupId);
        if (v09AgentRollup == null) {
            // v09AgentRollupId was manually deleted (via the UI) from the agent_rollup
            // table in which case its parent is no longer known and best to ignore
            continue;
        }
        BoundStatement boundStatement = insertPS.bind();
        boundStatement.setString(0, v09AgentRollup.agentRollupId());
        boundStatement.setString(1, row.getString(1));
        boundStatement.setInt(2, ttl);
        session.write(boundStatement);
    }
    dropTableIfExists("transaction_type_temp");
}
 
Example 13
Source File: GenerateIpcdPartitionId.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
public void execute(ExecutionContext context, boolean autoRollback) throws CommandExecutionException {
   Session session = context.getSession();
   PreparedStatement update = session.prepare(UPSERT_PARTITIONID);

   BoundStatement select = session.prepare(SELECT).bind();
   select.setConsistencyLevel(ConsistencyLevel.ALL);
   ResultSet rs = context.getSession().execute(select);
   int count = 0;
   int [] devsPerPartition = new int[partitionCount];
   logger.info("Preparing to partition ipcd devices...");
   long startTimeNs = System.nanoTime();
   for(Row row: rs) {
      String protocolAddress = row.getString("protocoladdress");
      UUID placeId = row.getUUID("placeid");
      int partitionId;
      if(placeId == null) {
         partitionId = 0;
      } else {
         partitionId = (int) (Math.floorMod(placeId.getLeastSignificantBits(), partitionCount));
      }

      logger.debug("Adding [{}] to partition [{}]", protocolAddress, partitionId);
      BoundStatement bs = update.bind(partitionId, protocolAddress);
      session.execute(bs);

      count++;
      devsPerPartition[partitionId]++;
   }
   long duration = System.nanoTime() - startTimeNs;
   logger.info("Partitioned {} ipcd devices in {} secs", count, duration / (float) TimeUnit.NANOSECONDS.toSeconds(1));
   for(int i=0; i<partitionCount; i++) {
      logger.info(String.format("%03d: %3d devs", i, devsPerPartition[i]));
   }
}
 
Example 14
Source File: GeneratePlacePartitionId.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
public void execute(ExecutionContext context, boolean autoRollback) throws CommandExecutionException {
   Session session = context.getSession();
   PreparedStatement update = session.prepare(UPSERT_PARTITIONID);
   update.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM);
   
   BoundStatement select = session.prepare(SELECT).bind();
   select.setConsistencyLevel(ConsistencyLevel.ALL);
   ResultSet rs = context.getSession().execute(select);
   int count = 0;
   int [] hubsPerPartition = new int[partitionCount];
   logger.info("Preparing to partition place ids");
   long startTimeNs = System.nanoTime();
   for(Row row: rs) {
      UUID placeId = row.getUUID("id");
      int partitionId = (int) (Math.floorMod(placeId.getLeastSignificantBits(), partitionCount));

      logger.debug("Adding [{}] to partition [{}]", placeId, partitionId);
      BoundStatement bs = update.bind(partitionId, placeId);
      session.execute(bs);
      
      count++;
      hubsPerPartition[partitionId]++;
   }
   long duration = System.nanoTime() - startTimeNs;
   logger.info("Partitioned {} place in {} secs", count, duration / (float) TimeUnit.NANOSECONDS.toSeconds(1));
   for(int i=0; i<partitionCount; i++) {
      logger.info(String.format("%03d: %3d places", i, hubsPerPartition[i]));
   }
}
 
Example 15
Source File: CassandraTarget.java    From datacollector with Apache License 2.0 4 votes vote down vote up
/**
 * Convert a Record into a fully-bound statement.
 */
@SuppressWarnings("unchecked")
private BoundStatement recordToBoundStatement(Record record) throws StageException {
  ImmutableList.Builder<Object> values = new ImmutableList.Builder<>();
  SortedSet<String> columnsPresent = Sets.newTreeSet(columnMappings.keySet());
  for (Map.Entry<String, String> mapping : columnMappings.entrySet()) {
    String columnName = mapping.getKey();
    String fieldPath = mapping.getValue();

    // If we're missing fields, skip them.
    // If a field is present, but null, also remove it from columnsPresent since we can't write nulls.
    if (!record.has(fieldPath) || record.get(fieldPath).getValue() == null) {
      columnsPresent.remove(columnName);
      continue;
    }

    final Object value = record.get(fieldPath).getValue();
    // Special cases for handling SDC Lists and Maps,
    // basically unpacking them into raw types.
    if (value instanceof List) {
      List<Object> unpackedList = new ArrayList<>();
      for (Field item : (List<Field>) value) {
        unpackedList.add(item.getValue());
      }
      values.add(unpackedList);
    } else if (value instanceof Map) {
      Map<Object, Object> unpackedMap = new HashMap<>();
      for (Map.Entry<String, Field> entry : ((Map<String, Field>) value).entrySet()) {
        unpackedMap.put(entry.getKey(), entry.getValue().getValue());
      }
      values.add(unpackedMap);
    } else {
      values.add(value);
    }
  }


  PreparedStatement stmt = statementCache.getUnchecked(columnsPresent);
  // .toArray required to pass in a list to a varargs method.
  Object[] valuesArray = values.build().toArray();
  BoundStatement boundStmt = null;
  try {
    boundStmt = stmt.bind(valuesArray);
  } catch (CodecNotFoundException | InvalidTypeException | NullPointerException e) {
    // NPE can occur if one of the values is a collection type with a null value inside it. Thus, it's a record
    // error. Note that this runs the risk of mistakenly treating a bug as a record error.
    // CodecNotFound is caused when there is no type conversion definition available from the provided type
    // to the target type.
    errorRecordHandler.onError(
        new OnRecordErrorException(
            record,
            Errors.CASSANDRA_06,
            record.getHeader().getSourceId(),
            e.toString(),
            e
        )
    );
  }
  return boundStmt;
}
 
Example 16
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 4 votes vote down vote up
private void populateTraceTnSlowCountAndPointPartialPart1() throws Exception {
    logger.info("populating trace_tn_slow_count_partial and trace_tn_slow_point_partial tables"
            + " - this could take several minutes on large data sets...");
    CentralStorageConfig storageConfig = getCentralStorageConfig(session);
    dropTableIfExists("trace_tn_slow_count_partial");
    dropTableIfExists("trace_tn_slow_point_partial");
    session.createTableWithTWCS("create table if not exists trace_tn_slow_count_partial"
            + " (agent_rollup varchar, transaction_type varchar, transaction_name varchar,"
            + " capture_time timestamp, agent_id varchar, trace_id varchar, primary key"
            + " ((agent_rollup, transaction_type, transaction_name), capture_time, agent_id,"
            + " trace_id))", storageConfig.traceExpirationHours(), false, true);
    session.createTableWithTWCS("create table if not exists trace_tn_slow_point_partial"
            + " (agent_rollup varchar, transaction_type varchar, transaction_name varchar,"
            + " capture_time timestamp, agent_id varchar, trace_id varchar, duration_nanos"
            + " bigint, error boolean, headline varchar, user varchar, attributes blob, primary"
            + " key ((agent_rollup, transaction_type, transaction_name), capture_time,"
            + " agent_id, trace_id))", storageConfig.traceExpirationHours(), false, true);
    PreparedStatement insertCountPartialPS = session.prepare("insert into"
            + " trace_tn_slow_count_partial (agent_rollup, transaction_type, transaction_name,"
            + " capture_time, agent_id, trace_id) values (?, ?, ?, ?, ?, ?) using ttl ?");
    PreparedStatement insertPointPartialPS = session.prepare("insert into"
            + " trace_tn_slow_point_partial (agent_rollup, transaction_type, transaction_name,"
            + " capture_time, agent_id, trace_id, duration_nanos, error, headline, user,"
            + " attributes) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) using ttl ?");
    int ttl = getCentralStorageConfig(session).getTraceTTL();
    ResultSet results = session.read("select agent_rollup, transaction_type,"
            + " transaction_name, capture_time, agent_id, trace_id, duration_nanos, error,"
            + " headline, user, attributes, partial from trace_tn_slow_point");
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    Stopwatch stopwatch = Stopwatch.createStarted();
    int rowCount = 0;
    for (Row row : results) {
        if (!row.getBool(11)) { // partial
            // unfortunately cannot use "where partial = true allow filtering" in the query
            // above as that leads to ReadTimeoutException
            continue;
        }
        BoundStatement boundStatement = insertCountPartialPS.bind();
        int i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        copyString(row, boundStatement, i++); // transaction_name
        Date captureDate = checkNotNull(row.getTimestamp(i));
        int adjustedTTL = Common.getAdjustedTTL(ttl, captureDate.getTime(), clock);
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        boundStatement = insertPointPartialPS.bind();
        i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        copyString(row, boundStatement, i++); // transaction_name
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        copyLong(row, boundStatement, i++); // duration_nanos
        copyBool(row, boundStatement, i++); // error
        copyString(row, boundStatement, i++); // headline
        copyString(row, boundStatement, i++); // user
        copyBytes(row, boundStatement, i++); // attributes
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        rowCount++;
        if (stopwatch.elapsed(SECONDS) > 60) {
            logger.info("processed {} records", rowCount);
            stopwatch.reset().start();
        }
        waitForSome(futures);
    }
    MoreFutures.waitForAll(futures);
    logger.info("populating trace_tn_slow_count_partial and trace_tn_slow_point_partial tables"
            + " - complete");
}
 
Example 17
Source File: DataAccessImpl.java    From hawkular-metrics with Apache License 2.0 4 votes vote down vote up
private BoundStatement bindDataPoint(PreparedStatement statement, Metric<?> metric, Object value, long timestamp) {
    MetricId<?> metricId = metric.getMetricId();
    return statement.bind(value, metricId.getTenantId(), metricId.getType().getCode(), metricId.getName(),
            DPART, getTimeUUID(timestamp));
}
 
Example 18
Source File: CassandraOperationImpl.java    From sunbird-lms-service with MIT License 4 votes vote down vote up
@Override
public Response updateRecord(String keyspaceName, String tableName, Map<String, Object> request) {
  long startTime = System.currentTimeMillis();
  ProjectLogger.log(
      "Cassandra Service updateRecord method started at ==" + startTime, LoggerEnum.INFO);
  Response response = new Response();
  try {
    String query = CassandraUtil.getUpdateQueryStatement(keyspaceName, tableName, request);
    PreparedStatement statement = connectionManager.getSession(keyspaceName).prepare(query);
    Object[] array = new Object[request.size()];
    int i = 0;
    String str = "";
    int index = query.lastIndexOf(Constants.SET.trim());
    str = query.substring(index + 4);
    str = str.replace(Constants.EQUAL_WITH_QUE_MARK, "");
    str = str.replace(Constants.WHERE_ID, "");
    str = str.replace(Constants.SEMICOLON, "");
    String[] arr = str.split(",");
    for (String key : arr) {
      array[i++] = request.get(key.trim());
    }
    array[i] = request.get(Constants.IDENTIFIER);
    BoundStatement boundStatement = statement.bind(array);
    connectionManager.getSession(keyspaceName).execute(boundStatement);
    response.put(Constants.RESPONSE, Constants.SUCCESS);
  } catch (Exception e) {
    e.printStackTrace();
    if (e.getMessage().contains(JsonKey.UNKNOWN_IDENTIFIER)) {
      ProjectLogger.log(
          Constants.EXCEPTION_MSG_UPDATE + tableName + " : " + e.getMessage(),
          e,
          LoggerEnum.ERROR.name());
      throw new ProjectCommonException(
          ResponseCode.invalidPropertyError.getErrorCode(),
          CassandraUtil.processExceptionForUnknownIdentifier(e),
          ResponseCode.CLIENT_ERROR.getResponseCode());
    }
    ProjectLogger.log(
        Constants.EXCEPTION_MSG_UPDATE + tableName + " : " + e.getMessage(),
        e,
        LoggerEnum.ERROR.name());
    throw new ProjectCommonException(
        ResponseCode.dbUpdateError.getErrorCode(),
        ResponseCode.dbUpdateError.getErrorMessage(),
        ResponseCode.SERVER_ERROR.getResponseCode());
  }
  logQueryElapseTime("updateRecord", startTime);
  return response;
}
 
Example 19
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 4 votes vote down vote up
private void populateActiveAgentTable(int rollupLevel) throws Exception {
    logger.info("populating active_agent_rollup_{} table - this could take"
            + " several minutes on large data sets...", rollupLevel);
    dropTableIfExists("active_agent_rollup_" + rollupLevel);
    int expirationHours =
            getCentralStorageConfig(session).rollupExpirationHours().get(rollupLevel);
    session.createTableWithTWCS("create table if not exists active_agent_rollup_" + rollupLevel
            + " (one int, capture_time timestamp, agent_id varchar, primary key (one,"
            + " capture_time, agent_id))", expirationHours);
    PreparedStatement insertPS = session.prepare("insert into active_agent_rollup_"
            + rollupLevel + " (one, capture_time, agent_id) values (1, ?, ?) using ttl ?");
    int ttl = Ints.saturatedCast(HOURS.toSeconds(expirationHours));
    long rollupIntervalMillis;
    if (rollupLevel < 3) {
        rollupIntervalMillis =
                RollupConfig.buildRollupConfigs().get(rollupLevel + 1).intervalMillis();
    } else {
        rollupIntervalMillis = DAYS.toMillis(1);
    }
    int[] negativeOffsets = new int[(int) (DAYS.toMillis(1) / rollupIntervalMillis)];
    for (int i = 0; i < negativeOffsets.length; i++) {
        negativeOffsets[i] = (int) (rollupIntervalMillis * (i + 1 - negativeOffsets.length));
    }
    PreparedStatement readPS = session.prepare(
            "select capture_time, agent_id from agent where one = 1 and capture_time > ?");
    BoundStatement boundStatement = readPS.bind();
    long now = clock.currentTimeMillis();
    boundStatement.setTimestamp(0, new Date(now - HOURS.toMillis(expirationHours)));
    ResultSet results = session.read(boundStatement);
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    for (Row row : results) {
        Date captureDate = checkNotNull(row.getTimestamp(0));
        String agentId = row.getString(1);
        for (int negativeOffset : negativeOffsets) {
            long offsetCaptureTime = captureDate.getTime() + negativeOffset;
            int adjustedTTL = Common.getAdjustedTTL(ttl, offsetCaptureTime, clock);
            boundStatement = insertPS.bind();
            boundStatement.setTimestamp(0, new Date(offsetCaptureTime));
            boundStatement.setString(1, agentId);
            boundStatement.setInt(2, adjustedTTL);
            futures.add(session.writeAsync(boundStatement));
            waitForSome(futures);
            if (offsetCaptureTime > now) {
                break;
            }
        }
    }
    MoreFutures.waitForAll(futures);
    logger.info("populating active_agent_rollup_{} table - complete", rollupLevel);
}
 
Example 20
Source File: DataAccessImpl.java    From hawkular-metrics with Apache License 2.0 4 votes vote down vote up
private BoundStatement bindDataPoint(PreparedStatement statement, Metric<?> metric, Object value,
        Map<String, String> tags, long timestamp) {
    MetricId<?> metricId = metric.getMetricId();
    return statement.bind(value, tags, metricId.getTenantId(), metricId.getType().getCode(),
            metricId.getName(), DPART, getTimeUUID(timestamp));
}