Java Code Examples for com.datastax.driver.core.ResultSet#isFullyFetched()

The following examples show how to use com.datastax.driver.core.ResultSet#isFullyFetched() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CustomerEnrichedInfoCassandraRepo.java    From examples with Apache License 2.0 5 votes vote down vote up
protected void load() throws ClassNotFoundException, SQLException
{
  Cluster cluster = Cluster.builder().addContactPoint(dataWarehouseConfig.getHost()).build();
  Session session = cluster.connect(dataWarehouseConfig.getDatabase());

  List<SingleRecord> customerInfoList = Lists.newArrayList();

  try {
    ResultSet rs = session.execute("select id, imsi, isdn, imei, operatorName, operatorCode, deviceBrand, deviceModel from "
        + dataWarehouseConfig.getDatabase() + "." + dataWarehouseConfig.getTableName());

    Map<String, String> nameValueMap = new HashMap<String, String>();

    Iterator<Row> rowIter = rs.iterator();
    while (!rs.isFullyFetched() && rowIter.hasNext()) {
      Row row = rowIter.next();
      nameValueMap.put("id", row.getString(0));
      nameValueMap.put("imsi", row.getString(1));
      nameValueMap.put("isdn", row.getString(2));
      nameValueMap.put("imei", row.getString(3));
      nameValueMap.put("operatorName", row.getString(4));
      nameValueMap.put("operatorCode", row.getString(5));
      nameValueMap.put("deviceBrand", row.getString(6));
      nameValueMap.put("deviceModel", row.getString(7));

      SingleRecord record = new SingleRecord(nameValueMap);
      customerInfoList.add(record);
    }

  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    if (session != null) {
      session.close();
    }
  }

  customerInfoArray = customerInfoList.toArray(new SingleRecord[0]);
}
 
Example 2
Source File: TestBaselineApproach.java    From bboxdb with Apache License 2.0 5 votes vote down vote up
/**
 * Execute a range query
 * @param destTable
 * @param range
 */
public void executeRangeQuery(final String destTable, final String format, final Hyperrectangle range) {
	System.out.println("# Execute range query in range " + range);

	final Stopwatch stopwatch = Stopwatch.createStarted();
	final TupleBuilder tupleBuilder = TupleBuilderFactory.getBuilderForFormat(format);

	long readRecords = 0;
	long resultRecords = 0;

	final SimpleStatement statement = new SimpleStatement("SELECT * FROM " + destTable);
	statement.setFetchSize(2000); // Use 2000 tuples per page

	final ResultSet rs = session.execute(statement);

	for (final Row row : rs) {

		// Request next page
	    if (rs.getAvailableWithoutFetching() == 100 && !rs.isFullyFetched()) {
	        rs.fetchMoreResults(); // this is asynchronous
	    }

	    readRecords++;

	    final long id = row.getLong(0);
	    final String text = row.getString(1);

	    final Tuple tuple = tupleBuilder.buildTuple(text, Long.toString(id));

	    if(tuple.getBoundingBox().intersects(range)) {
	    	resultRecords++;
	    }
	}

	System.out.println("# Read records " + readRecords + " result records " + resultRecords);
	System.out.println("# Execution time in sec " + stopwatch.elapsed(TimeUnit.SECONDS));
}
 
Example 3
Source File: QueryCassandra.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
/**
 * Converts a result set into an Avro record and writes it to the given stream.
 *
 * @param rs        The result set to convert
 * @param outStream The stream to which the Avro record will be written
 * @param timeout   The max number of timeUnits to wait for a result set fetch to complete
 * @param timeUnit  The unit of time (SECONDS, e.g.) associated with the timeout amount
 * @return The number of rows from the result set written to the stream
 * @throws IOException          If the Avro record cannot be written
 * @throws InterruptedException If a result set fetch is interrupted
 * @throws TimeoutException     If a result set fetch has taken longer than the specified timeout
 * @throws ExecutionException   If any error occurs during the result set fetch
 */
public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream,
                                       long timeout, TimeUnit timeUnit)
        throws IOException, InterruptedException, TimeoutException, ExecutionException {

    final Schema schema = createSchema(rs);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
        long nrOfRows = 0;
        if (columnDefinitions != null) {
            do {

                // Grab the ones we have
                int rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
                if (rowsAvailableWithoutFetching == 0) {
                    // Get more
                    if (timeout <= 0 || timeUnit == null) {
                        rs.fetchMoreResults().get();
                    } else {
                        rs.fetchMoreResults().get(timeout, timeUnit);
                    }
                }

                for (Row row : rs) {

                    for (int i = 0; i < columnDefinitions.size(); i++) {
                        final DataType dataType = columnDefinitions.getType(i);

                        if (row.isNull(i)) {
                            rec.put(i, null);
                        } else {
                            rec.put(i, getCassandraObject(row, i, dataType));
                        }
                    }
                    dataFileWriter.append(rec);
                    nrOfRows += 1;

                }
            } while (!rs.isFullyFetched());
        }
        return nrOfRows;
    }
}
 
Example 4
Source File: TestBaselineApproach.java    From bboxdb with Apache License 2.0 4 votes vote down vote up
/**
 * Execute a join (nested loop join)
 * @param table1
 * @param table2
 * @param range
 */
public void executeJoin(final String table1, final String table2, final String format1,
		final String format2, final Hyperrectangle range, final double padding) {

	System.out.println("# Execute join query in range " + range);

	final Stopwatch stopwatch = Stopwatch.createStarted();

	final TupleBuilder tupleBuilder1 = TupleBuilderFactory.getBuilderForFormat(format1);
    final TupleBuilder tupleBuilder2 = TupleBuilderFactory.getBuilderForFormat(format2);

	long readRecords = 0;
	long resultRecords = 0;

	final SimpleStatement statementTable1 = new SimpleStatement("SELECT * FROM " + table1);
	statementTable1.setFetchSize(2000); // Use 2000 tuples per page

	final ResultSet rs1 = session.execute(statementTable1);

	for (final Row row1 : rs1) {

		// Request next page
	    if (rs1.getAvailableWithoutFetching() == 100 && !rs1.isFullyFetched()) {
	        rs1.fetchMoreResults(); // this is asynchronous
	    }

	    readRecords++;

	    final long id1 = row1.getLong(0);
	    final String text1 = row1.getString(1);

	    final Tuple tuple1 = tupleBuilder1.buildTuple(text1, Long.toString(id1));

	    // Tuple is outside of our query range
	    if(! tuple1.getBoundingBox().intersects(range)) {
	    	continue;
	    }

		// Perform the nested loop join
	    final SimpleStatement statementTable2 = new SimpleStatement("SELECT * FROM " + table2);
		statementTable2.setFetchSize(2000); // Use 2000 tuples per page

		final ResultSet rs2 = session.execute(statementTable2);

		for (final Row row2 : rs2) {
			// Request next page
		    if (rs1.getAvailableWithoutFetching() == 100 && !rs1.isFullyFetched()) {
		        rs1.fetchMoreResults(); // this is asynchronous
		    }

		    readRecords++;

		    final long id2 = row2.getLong(0);
		    final String text2 = row2.getString(1);

		    final Tuple tuple2 = tupleBuilder2.buildTuple(text2, Long.toString(id2));

		    if(tuple1.getBoundingBox().intersects(tuple2.getBoundingBox().enlargeByAmount(padding))) {
		    	resultRecords++;
		    }
		}
	}

	System.out.println("# Read records " + readRecords + " result records " + resultRecords);
	System.out.println("# Execution time in sec " + stopwatch.elapsed(TimeUnit.SECONDS));
}
 
Example 5
Source File: CassandraUtils.java    From james-project with Apache License 2.0 4 votes vote down vote up
private boolean fetchNeeded(ResultSet resultSet) {
    return resultSet.getAvailableWithoutFetching() == cassandraConfiguration.getFetchNextPageInAdvanceRow()
        && !resultSet.isFullyFetched();
}
 
Example 6
Source File: QueryCassandra.java    From nifi with Apache License 2.0 4 votes vote down vote up
/**
 * Converts a result set into an Avro record and writes it to the given stream.
 *
 * @param rs        The result set to convert
 * @param outStream The stream to which the Avro record will be written
 * @param timeout   The max number of timeUnits to wait for a result set fetch to complete
 * @param timeUnit  The unit of time (SECONDS, e.g.) associated with the timeout amount
 * @return The number of rows from the result set written to the stream
 * @throws IOException          If the Avro record cannot be written
 * @throws InterruptedException If a result set fetch is interrupted
 * @throws TimeoutException     If a result set fetch has taken longer than the specified timeout
 * @throws ExecutionException   If any error occurs during the result set fetch
 */
public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream,
                                       long timeout, TimeUnit timeUnit)
        throws IOException, InterruptedException, TimeoutException, ExecutionException {

    final Schema schema = createSchema(rs);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
        long nrOfRows = 0;
        if (columnDefinitions != null) {
            do {

                // Grab the ones we have
                int rowsAvailableWithoutFetching = rs.getAvailableWithoutFetching();
                if (rowsAvailableWithoutFetching == 0) {
                    // Get more
                    if (timeout <= 0 || timeUnit == null) {
                        rs.fetchMoreResults().get();
                    } else {
                        rs.fetchMoreResults().get(timeout, timeUnit);
                    }
                }

                for (Row row : rs) {

                    for (int i = 0; i < columnDefinitions.size(); i++) {
                        final DataType dataType = columnDefinitions.getType(i);

                        if (row.isNull(i)) {
                            rec.put(i, null);
                        } else {
                            rec.put(i, getCassandraObject(row, i, dataType));
                        }
                    }
                    dataFileWriter.append(rec);
                    nrOfRows += 1;

                }
            } while (!rs.isFullyFetched());
        }
        return nrOfRows;
    }
}