Java Code Examples for com.datastax.driver.core.Row#getString()

The following examples show how to use com.datastax.driver.core.Row#getString() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 6 votes vote down vote up
private void updateRoles() throws Exception {
    PreparedStatement insertPS =
            session.prepare("insert into role (name, permissions) values (?, ?)");
    ResultSet results = session.read("select name, permissions from role");
    for (Row row : results) {
        String name = row.getString(0);
        Set<String> permissions = row.getSet(1, String.class);
        Set<String> upgradedPermissions = upgradePermissions(permissions);
        if (upgradedPermissions == null) {
            continue;
        }
        BoundStatement boundStatement = insertPS.bind();
        boundStatement.setString(0, name);
        boundStatement.setSet(1, upgradedPermissions, String.class);
        session.write(boundStatement);
    }
}
 
Example 2
Source File: UserDAO.java    From hawkular-apm with Apache License 2.0 6 votes vote down vote up
public User getUser(String id) {
//        Statement statement = QueryBuilder.select()
//                .from(KEYSPACE, TABLE)
//                .where(QueryBuilder.eq("id", id))
//                .enableTracing();

        BoundStatement boundStatement = session.prepare("SELECT * FROM " + KEYSPACE + "." + TABLE +
                " WHERE id = ?").bind(id);

        ResultSet resultSet = executeWithClientSpan(boundStatement);

        User user = null;
        for (Row row: resultSet) {
            user = new User(row.getString("id"), row.getString("name"));
        }

        return user;
    }
 
Example 3
Source File: CQLService.java    From Doradus with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> getRows(String storeName, String continuationToken, int count) {
    String tableName = storeToCQLName(storeName);
    Set<String> rows = new HashSet<String>();
    //unfortunately I don't know how to get one record per row in CQL so we'll read everything
    //and find out the rows
    ResultSet rs = executeQuery(Query.SELECT_ROWS_RANGE, tableName);
    while(true) {
        Row r = rs.one();
        if(r == null) break;
        String key = r.getString("key");
        if(continuationToken != null && continuationToken.compareTo(key) >= 0) {
            continue;
        }
        rows.add(key);
    }
    List<String> result = new ArrayList<>(rows);
    Collections.sort(result);
    if(result.size() > count) {
        result = new ArrayList<>(result.subList(0,  count));
    }
    return result;
}
 
Example 4
Source File: IrisRealm.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private String[] getPasswordForUser(com.datastax.driver.core.Session cassandraSession, String username) throws SQLException {
   String[] result;
   boolean returningSeparatedSalt = false;
   switch (saltStyle) {
      case NO_SALT:
      case CRYPT:
      case EXTERNAL:
         result = new String[1];
         break;
      default:
         result = new String[2];
         returningSeparatedSalt = true;
   }

   ParsedEmail parsedEmail = ParsedEmail.parse(username);
   BoundStatement boundStatement = new BoundStatement(preparedAuthenticationQuery);
   Row row = cassandraSession.execute(boundStatement.bind(parsedEmail.getDomain(), parsedEmail.getUser_0_3(), parsedEmail.getUser())).one();

   if (row == null) {
      return result;
   }

   result[0] = row.getString("password");
   if (returningSeparatedSalt) {
      result[1] = row.getString("password_salt");
   }

   return result;
}
 
Example 5
Source File: CassandraIO.java    From beam with Apache License 2.0 5 votes vote down vote up
/**
 * Gets the list of token ranges that a table occupies on a give Cassandra node.
 *
 * <p>NB: This method is compatible with Cassandra 2.1.5 and greater.
 */
private static List<TokenRange> getTokenRanges(Cluster cluster, String keyspace, String table) {
  try (Session session = cluster.newSession()) {
    ResultSet resultSet =
        session.execute(
            "SELECT range_start, range_end, partitions_count, mean_partition_size FROM "
                + "system.size_estimates WHERE keyspace_name = ? AND table_name = ?",
            keyspace,
            table);

    ArrayList<TokenRange> tokenRanges = new ArrayList<>();
    for (Row row : resultSet) {
      TokenRange tokenRange =
          new TokenRange(
              row.getLong("partitions_count"),
              row.getLong("mean_partition_size"),
              new BigInteger(row.getString("range_start")),
              new BigInteger(row.getString("range_end")));
      tokenRanges.add(tokenRange);
    }
    // The table may not contain the estimates yet
    // or have partitions_count and mean_partition_size fields = 0
    // if the data was just inserted and the amount of data in the table was small.
    // This is very common situation during tests,
    // when we insert a few rows and immediately query them.
    // However, for tiny data sets the lack of size estimates is not a problem at all,
    // because we don't want to split tiny data anyways.
    // Therefore, we're not issuing a warning if the result set was empty
    // or mean_partition_size and partitions_count = 0.
    return tokenRanges;
  }
}
 
Example 6
Source File: BookRepository.java    From tutorials with MIT License 5 votes vote down vote up
/**
 * Select all books from books
 * 
 * @return
 */
public List<Book> selectAll() {
    StringBuilder sb = new StringBuilder("SELECT * FROM ").append(TABLE_NAME);

    final String query = sb.toString();
    ResultSet rs = session.execute(query);

    List<Book> books = new ArrayList<Book>();

    for (Row r : rs) {
        Book book = new Book(r.getUUID("id"), r.getString("title"), r.getString("author"), r.getString("subject"));
        books.add(book);
    }
    return books;
}
 
Example 7
Source File: EventTranslator.java    From concursus with MIT License 5 votes vote down vote up
@Override
public void processRow(Row row) throws DriverException {
    String aggregateType = row.getString(AGGREGATE_TYPE);
    String name = row.getString(EVENT_NAME);
    String version = row.getString(EVENT_VERSION);

    VersionedName versionedName = VersionedName.of(name, version);
    EventType eventType = EventType.of(aggregateType, versionedName);

    matcher.match(eventType).ifPresent(tupleSchema -> {
        createEvent(row, aggregateType, versionedName, tupleSchema);
    });
}
 
Example 8
Source File: CassandraMailboxPathDAOImpl.java    From james-project with Apache License 2.0 5 votes vote down vote up
private CassandraIdAndPath fromRowToCassandraIdAndPath(Row row) {
    return new CassandraIdAndPath(
        CassandraId.of(row.getUUID(MAILBOX_ID)),
        new MailboxPath(row.getUDTValue(NAMESPACE_AND_USER).getString(CassandraMailboxTable.MailboxBase.NAMESPACE),
            Username.of(row.getUDTValue(NAMESPACE_AND_USER).getString(CassandraMailboxTable.MailboxBase.USER)),
            row.getString(MAILBOX_NAME)));
}
 
Example 9
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void populateAgentDisplayTable() throws Exception {
    dropTableIfExists("agent_display");
    session.createTableWithLCS("create table if not exists agent_display (agent_rollup_id"
            + " varchar, display varchar, primary key (agent_rollup_id))");
    PreparedStatement insertPS = session
            .prepare("insert into agent_display (agent_rollup_id, display) values (?, ?)");
    ResultSet results = session.read("select agent_rollup_id, config from agent_config");
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    for (Row row : results) {
        String agentRollupId = row.getString(0);
        AgentConfig agentConfig;
        try {
            agentConfig = AgentConfig.parseFrom(checkNotNull(row.getBytes(1)));
        } catch (InvalidProtocolBufferException e) {
            logger.error(e.getMessage(), e);
            continue;
        }
        String display = agentConfig.getGeneralConfig().getDisplay();
        if (!display.isEmpty()) {
            BoundStatement boundStatement = insertPS.bind();
            int i = 0;
            boundStatement.setString(i++, agentRollupId);
            boundStatement.setString(i++, display);
            futures.add(session.writeAsync(boundStatement));
            waitForSome(futures);
        }
    }
    MoreFutures.waitForAll(futures);
}
 
Example 10
Source File: CqlMetaDaoImpl.java    From staash with Apache License 2.0 5 votes vote down vote up
public  Map<String,JsonObject> LoadStorage() {
    ResultSet rs = session
            .execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+MetaConstants.STAASH_STORAGE_TYPE_ENTITY+"';");
    List<Row> rows = rs.all();
    Map<String,JsonObject> storageMap = new HashMap<String,JsonObject>();
    for (Row row : rows) {
        String field = row.getString(0);
        JsonObject val = new JsonObject(row.getString(1));
        jsonStorage.putObject(field, val);
        storageMap.put(field, val);
    }
    return storageMap;
}
 
Example 11
Source File: CassandraStorage.java    From cassandra-reaper with Apache License 2.0 5 votes vote down vote up
private static DiagEventSubscription createDiagEventSubscription(Row row) {
  return new DiagEventSubscription(
      Optional.of(row.getUUID("id")),
      row.getString("cluster"),
      Optional.of(row.getString("description")),
      row.getSet("nodes", String.class),
      row.getSet("events", String.class),
      row.getBool("export_sse"),
      row.getString("export_file_logger"),
      row.getString("export_http_endpoint"));
}
 
Example 12
Source File: BaseMetricsITest.java    From hawkular-metrics with Apache License 2.0 5 votes vote down vote up
protected void assertMetricsTagsIndexMatches(String tenantId, String tag, List<MetricsTagsIndexEntry> expected)
    throws Exception {
    List<Row> rows = dataAccess.findMetricsByTagName(tenantId, tag).toList().toBlocking().first();
    List<MetricsTagsIndexEntry> actual = new ArrayList<>();

    for (Row row : rows) {
        MetricType<?> type = MetricType.fromCode(row.getByte(1));
        MetricId<?> id = new MetricId<>(tenantId, type, row.getString(2));
        actual.add(new MetricsTagsIndexEntry(row.getString(3), id)); // Need value here.. pff.
    }

    assertEquals(actual, expected, "The metrics tags index entries do not match");
}
 
Example 13
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private void rewriteEnvironmentTablePart2() throws Exception {
    if (!tableExists("environment_temp")) {
        // previously failed mid-upgrade prior to updating schema version
        return;
    }
    dropTableIfExists("environment");
    session.createTableWithLCS("create table if not exists environment (agent_id varchar,"
            + " environment blob, primary key (agent_id))");
    PreparedStatement insertPS = session
            .prepare("insert into environment (agent_id, environment) values (?, ?)");
    Map<String, V09AgentRollup> v09AgentRollups = getV09AgentRollupsFromAgentRollupTable();
    ResultSet results = session.read("select agent_id, environment from environment_temp");
    for (Row row : results) {
        String v09AgentRollupId = row.getString(0);
        V09AgentRollup v09AgentRollup = v09AgentRollups.get(v09AgentRollupId);
        if (v09AgentRollup == null) {
            // v09AgentRollupId was manually deleted (via the UI) from the agent_rollup
            // table in which case its parent is no longer known and best to ignore
            continue;
        }
        BoundStatement boundStatement = insertPS.bind();
        boundStatement.setString(0, v09AgentRollup.agentRollupId());
        boundStatement.setBytes(1, row.getBytes(1));
        session.write(boundStatement);
    }
    dropTableIfExists("environment_temp");
}
 
Example 14
Source File: HadoopFormatIOCassandraTest.java    From beam with Apache License 2.0 4 votes vote down vote up
@Override
public String apply(Row input) {
  return input.getInt("id") + "|" + input.getString("scientist");
}
 
Example 15
Source File: CassandraTweetRepository.java    From twissandra-j with Apache License 2.0 4 votes vote down vote up
public Tweet getTweet(UUID id) {
	Row row = getOneRow(execute("SELECT username, body FROM tweets WHERE tweetid = %s", id.toString()));
	return row != null ? new Tweet(row.getString("username"), row.getString("body"), id, fromUUID1(id)) : null;
}
 
Example 16
Source File: CassandraLoader.java    From swblocks-decisiontree with Apache License 2.0 4 votes vote down vote up
private Audit getAuditForChange(final Row row) {
    return new Audit(row.getString("initiator"),
            row.get("initiatortime", Instant.class), row.getString("approver"),
            row.get("approvertime", Instant.class));
}
 
Example 17
Source File: EnqueuedMailsDaoUtil.java    From james-project with Apache License 2.0 4 votes vote down vote up
static EnqueuedItemWithSlicingContext toEnqueuedMail(Row row, BlobId.Factory blobFactory) {
    MailQueueName queueName = MailQueueName.fromString(row.getString(QUEUE_NAME));
    EnqueueId enqueueId = EnqueueId.of(row.getUUID(ENQUEUE_ID));
    Instant timeRangeStart = row.getTimestamp(TIME_RANGE_START).toInstant();
    BucketedSlices.BucketId bucketId = BucketedSlices.BucketId.of(row.getInt(BUCKET_ID));
    Instant enqueuedTime = row.getTimestamp(ENQUEUED_TIME).toInstant();
    BlobId headerBlobId = blobFactory.from(row.getString(HEADER_BLOB_ID));
    BlobId bodyBlobId = blobFactory.from(row.getString(BODY_BLOB_ID));
    MimeMessagePartsId mimeMessagePartsId = MimeMessagePartsId
        .builder()
        .headerBlobId(headerBlobId)
        .bodyBlobId(bodyBlobId)
        .build();

    MailAddress sender = Optional.ofNullable(row.getString(SENDER))
        .map(Throwing.function(MailAddress::new))
        .orElse(null);
    List<MailAddress> recipients = row.getList(RECIPIENTS, String.class)
        .stream()
        .map(Throwing.function(MailAddress::new))
        .collect(ImmutableList.toImmutableList());
    String state = row.getString(STATE);
    String remoteAddr = row.getString(REMOTE_ADDR);
    String remoteHost = row.getString(REMOTE_HOST);
    String errorMessage = row.getString(ERROR_MESSAGE);
    String name = row.getString(NAME);
    Date lastUpdated = row.getTimestamp(LAST_UPDATED);
    Map<String, ByteBuffer> rawAttributes = row.getMap(ATTRIBUTES, String.class, ByteBuffer.class);
    PerRecipientHeaders perRecipientHeaders = fromList(row.getList(PER_RECIPIENT_SPECIFIC_HEADERS, TupleValue.class));

    MailImpl mail = MailImpl.builder()
        .name(name)
        .sender(sender)
        .addRecipients(recipients)
        .lastUpdated(lastUpdated)
        .errorMessage(errorMessage)
        .remoteHost(remoteHost)
        .remoteAddr(remoteAddr)
        .state(state)
        .addAllHeadersForRecipients(perRecipientHeaders)
        .addAttributes(toAttributes(rawAttributes))
        .build();
    EnqueuedItem enqueuedItem = EnqueuedItem.builder()
        .enqueueId(enqueueId)
        .mailQueueName(queueName)
        .mail(mail)
        .enqueuedTime(enqueuedTime)
        .mimeMessagePartsId(mimeMessagePartsId)
        .build();


    return EnqueuedItemWithSlicingContext.builder()
        .enqueuedItem(enqueuedItem)
        .slicingContext(EnqueuedItemWithSlicingContext.SlicingContext.of(bucketId, timeRangeStart))
        .build();
}
 
Example 18
Source File: GuicedCassandraSessionDAO.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
private Session hydrateSession(UUID id, Row row) {
	UUID rowId = row.getUUID(Columns.ID);
	if (id.equals(rowId)) {
		Date start = row.getTimestamp(Columns.START);
		// If this is null, then the row is a tombstone.
		if (start != null) {
			ByteBuffer buffer = row.getBytes(Columns.SERIALIZED);
			// If the buffer has anything, then it is an old style serialized session.
			if (buffer != null && buffer.remaining() > 0) {
				byte[] bytes = new byte[buffer.remaining()];
            buffer.get(bytes);
            return serializer.deserialize(bytes);
			}
			else {
				// New style session. Read the fields and create a session.
             Date stop = row.getTimestamp(Columns.STOP);
             Date lastAccess = row.getTimestamp(Columns.LAST_ACCESS);
				long timeout = row.getLong(Columns.TIMEOUT);
				boolean expired = row.getBool(Columns.EXPIRED);
				String host = row.getString(Columns.HOST);
				
				// Read the attributes
				Map<String, String> serialized_attrs = row.getMap(Columns.ATTRIBUTES, String.class, String.class);
				Map<Object, Object> attributes = new HashMap<>();
				for (Map.Entry<String, String> entry : serialized_attrs.entrySet()) {
					String json = entry.getValue();
					if (json != null && !json.isEmpty()) {
						attributes.put(entry.getKey(), deserializeAttribute(entry.getKey(), json));
					}
				}
				
				// Create and populate the session.
				SimpleSession session = new SimpleSession();
				session.setId(rowId);
				session.setStartTimestamp(start);
				session.setStopTimestamp(stop);
				session.setLastAccessTime(lastAccess);
				session.setTimeout(timeout);
				session.setExpired(expired);
				session.setHost(host);
				session.setAttributes(attributes);
				
				return session;
			}
		}
	}
	return null;
}
 
Example 19
Source File: DropCompleter.java    From Karaf-Cassandra with Apache License 2.0 4 votes vote down vote up
public int complete(Session session, CommandLine commandLine,
		List<String> candidates) {
	if (session != null) {

		com.datastax.driver.core.Session cassandraSession = (com.datastax.driver.core.Session) session
				.get(SessionParameter.CASSANDRA_SESSION);

		if (cassandraSession == null) {
			System.err
					.println("No active session found--run the connect command first");
			return 0;
		}

		if (commandLine instanceof ArgumentCommandLine) {
			delegate.getStrings().add(commandLine.getCursorArgument());

		} else {
			/*
			 * DROP ( KEYSPACE | SCHEMA ) IF EXISTS keyspace_name
			 */

			/*
			 * DROP TABLE IF EXISTS keyspace_name.table_name
			 */

			List<String> arguments = Arrays.asList(commandLine
					.getArguments());
			int cursorArgumentIndex = commandLine.getCursorArgumentIndex();
			String validArgument; 
			if (cursorArgumentIndex > 1)
				validArgument = arguments.get(cursorArgumentIndex -1);
			else
				validArgument = arguments.get(0);
			
			if (cursorArgumentIndex <= 1) {
				delegate.getStrings().add("KEYSPACE");
				delegate.getStrings().add("SCHEMA");
				// alternative drop a table
				delegate.getStrings().add("TABLE");
			} else if (cursorArgumentIndex >= 2) {
				delegate.getStrings().clear();
				if (cursorArgumentIndex == 2)
					delegate.getStrings().add("IF EXISTS");

				// tables are needed.
				if (validArgument.equalsIgnoreCase("KEYSPACE")
						|| validArgument.equalsIgnoreCase("SCHEMA")
						|| cassandraSession.getLoggedKeyspace() == null) {
					CompleterCommons.completeKeySpace(delegate,
							cassandraSession);
				} else {
					String keyspace = cassandraSession.getLoggedKeyspace();
					ResultSet execute = cassandraSession
							.execute(String
									.format("select columnfamily_name from system.schema_columnfamilies where keyspace_name = '%s';",
											keyspace));
					for (Row row : execute) {
						String table = row.getString("columnfamily_name");
						delegate.getStrings().add(table);
					}
				}
			} 
		}
	}
	return delegate.complete(session, commandLine, candidates);

}
 
Example 20
Source File: SchemaUpgrade.java    From glowroot with Apache License 2.0 4 votes vote down vote up
private void populateActiveAgentTable(int rollupLevel) throws Exception {
    logger.info("populating active_agent_rollup_{} table - this could take"
            + " several minutes on large data sets...", rollupLevel);
    dropTableIfExists("active_agent_rollup_" + rollupLevel);
    int expirationHours =
            getCentralStorageConfig(session).rollupExpirationHours().get(rollupLevel);
    session.createTableWithTWCS("create table if not exists active_agent_rollup_" + rollupLevel
            + " (one int, capture_time timestamp, agent_id varchar, primary key (one,"
            + " capture_time, agent_id))", expirationHours);
    PreparedStatement insertPS = session.prepare("insert into active_agent_rollup_"
            + rollupLevel + " (one, capture_time, agent_id) values (1, ?, ?) using ttl ?");
    int ttl = Ints.saturatedCast(HOURS.toSeconds(expirationHours));
    long rollupIntervalMillis;
    if (rollupLevel < 3) {
        rollupIntervalMillis =
                RollupConfig.buildRollupConfigs().get(rollupLevel + 1).intervalMillis();
    } else {
        rollupIntervalMillis = DAYS.toMillis(1);
    }
    int[] negativeOffsets = new int[(int) (DAYS.toMillis(1) / rollupIntervalMillis)];
    for (int i = 0; i < negativeOffsets.length; i++) {
        negativeOffsets[i] = (int) (rollupIntervalMillis * (i + 1 - negativeOffsets.length));
    }
    PreparedStatement readPS = session.prepare(
            "select capture_time, agent_id from agent where one = 1 and capture_time > ?");
    BoundStatement boundStatement = readPS.bind();
    long now = clock.currentTimeMillis();
    boundStatement.setTimestamp(0, new Date(now - HOURS.toMillis(expirationHours)));
    ResultSet results = session.read(boundStatement);
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    for (Row row : results) {
        Date captureDate = checkNotNull(row.getTimestamp(0));
        String agentId = row.getString(1);
        for (int negativeOffset : negativeOffsets) {
            long offsetCaptureTime = captureDate.getTime() + negativeOffset;
            int adjustedTTL = Common.getAdjustedTTL(ttl, offsetCaptureTime, clock);
            boundStatement = insertPS.bind();
            boundStatement.setTimestamp(0, new Date(offsetCaptureTime));
            boundStatement.setString(1, agentId);
            boundStatement.setInt(2, adjustedTTL);
            futures.add(session.writeAsync(boundStatement));
            waitForSome(futures);
            if (offsetCaptureTime > now) {
                break;
            }
        }
    }
    MoreFutures.waitForAll(futures);
    logger.info("populating active_agent_rollup_{} table - complete", rollupLevel);
}