com.netflix.astyanax.connectionpool.exceptions.ConnectionException Java Examples

The following examples show how to use com.netflix.astyanax.connectionpool.exceptions.ConnectionException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AstyanaxThriftClusterAdminResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void updateColumnFamily(@PathParam("keyspace") String keyspaceName, String columnFamilyName, ColumnFamilyEntity columnFamily) throws PaasException {
    LOG.info("Updating column family: '{}.{}.{}'", new Object[] {clusterKey.getClusterName(), keyspaceName, columnFamily.getName()});
    
    columnFamily.setKeyspaceName(keyspaceName);
    columnFamily.setClusterName(clusterKey.getClusterName());
    
    try {
        Properties props = new Properties();
        props.putAll(cluster.getColumnFamilyProperties(keyspaceName, columnFamilyName));
        if (columnFamily.getOptions() != null) {
            props.putAll(columnFamily.getOptions());
        }
        props.setProperty("name",     columnFamily.getName());
        props.setProperty("keyspace", columnFamily.getKeyspaceName());

        cluster.createColumnFamily(props);
        eventBus.post(new ColumnFamilyUpdateEvent(new ColumnFamilyKey(new KeyspaceKey(clusterKey, keyspaceName), columnFamily.getName())));
    } catch (ConnectionException e) {
        throw new PaasException(String.format("Error creating column family '%s.%s' on cluster '%s'", 
                keyspaceName, columnFamily.getName(), clusterKey.getClusterName()), e);
    }
}
 
Example #2
Source File: V003.java    From mutagen-cassandra with Apache License 2.0 6 votes vote down vote up
@Override
protected void performMutation(Context context) {
	context.debug("Executing mutation {}",state.getID());
	final ColumnFamily<String,String> CF_TEST1=
		ColumnFamily.newColumnFamily("Test1",
			StringSerializer.get(),StringSerializer.get());

	MutationBatch batch=getKeyspace().prepareMutationBatch();
	batch.withRow(CF_TEST1,"row2")
		.putColumn("value1","chicken")
		.putColumn("value2","sneeze");

	try {
		batch.execute();
	}
	catch (ConnectionException e) {
		throw new MutagenException("Could not update columnfamily Test1",e);
	}
}
 
Example #3
Source File: CassandraStorage.java    From greycat with Apache License 2.0 6 votes vote down vote up
@Override
public void put(Buffer stream, Callback<Boolean> callback) {
    MutationBatch m = keyspace.prepareMutationBatch();
    BufferIterator it = stream.iterator();
    while (it.hasNext()) {
        Buffer keyView = it.next();
        Buffer valueView = it.next();
        if (valueView != null) {
            m.withRow(MWG, keyView.data()).putColumn(0, valueView.data());
        }
    }
    try {
        @SuppressWarnings("unused")
        OperationResult<Void> result = m.execute();
        callback.on(true);
    } catch (ConnectionException e) {
        callback.on(false);
    }
}
 
Example #4
Source File: DeltaPlacementFactory.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public Placement newPlacement(String placement) throws ConnectionException {
    String[] parsed = PlacementUtil.parsePlacement(placement);
    String keyspaceName = parsed[0];
    String cfPrefix = parsed[1];

    CassandraKeyspace keyspace = _keyspaceMap.get(keyspaceName);
    if (keyspace == null) {
        throw new UnknownPlacementException(format(
                "Placement string refers to unknown or non-local Cassandra keyspace: %s", keyspaceName), placement);
    }

    KeyspaceDefinition keyspaceDef = keyspace.getAstyanaxKeyspace().describeKeyspace();
    AnnotatedCompositeSerializer<DeltaKey> deltaKeySerializer  = new AnnotatedCompositeSerializer<DeltaKey>(DeltaKey.class);

    // DDL's are not actually configurable due to the way we abstract the names from the placements here.
    // In the future, we should either phase out the DDL config or change the implementation here to conform to it.
    ColumnFamily<ByteBuffer, DeltaKey> blockedDeltaCf = getColumnFamily(keyspaceDef, cfPrefix, "delta_v2", placement, deltaKeySerializer);
    ColumnFamily<ByteBuffer, UUID> deltaHistoryCf = getColumnFamily(keyspaceDef, cfPrefix, "history", placement, TimeUUIDSerializer.get());

    // Calculate the data centers on demand since they may change in a live system.
    return new DeltaPlacement(placement, keyspace, blockedDeltaCf, deltaHistoryCf);
}
 
Example #5
Source File: InstrumentedTracerFactory.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
public CassandraOperationTracer newTracer(final CassandraOperationType type) {
    return new CassandraOperationTracer() {
        private long _start;

        @Override
        public CassandraOperationTracer start() {
            checkState(_start == 0);  // Verify the tracer is used in a single threaded manner.
            _start = _clock.getTick();
            return this;
        }

        @Override
        public void success() {
            _successTimers.getUnchecked(type).update(_clock.getTick() - _start, TimeUnit.NANOSECONDS);
            _start = 0;
        }

        @Override
        public void failure(ConnectionException e) {
            _failureTimers.getUnchecked(type).update(_clock.getTick() - _start, TimeUnit.NANOSECONDS);
            _start = 0;
        }
    };
}
 
Example #6
Source File: AstyanaxReader.java    From blueflood with Apache License 2.0 6 votes vote down vote up
/**
 * Method that returns all metadata for a given locator as a map.
 *
 * @param locator  locator name
 * @return Map of metadata for that locator
 * @throws RuntimeException(com.netflix.astyanax.connectionpool.exceptions.ConnectionException)
 */
public Map<String, String> getMetadataValues(Locator locator) {
    Timer.Context ctx = Instrumentation.getReadTimerContext(CassandraModel.CF_METRICS_METADATA_NAME);
    try {
        final ColumnList<String> results = keyspace.prepareQuery(CassandraModel.CF_METRICS_METADATA)
                .getKey(locator)
                .execute().getResult();
        return new HashMap<String, String>(){{
            for (Column<String> result : results) {
                put(result.getName(), result.getValue(StringMetadataSerializer.get()));
            }
        }};
    } catch (NotFoundException ex) {
        Instrumentation.markNotFound(CassandraModel.CF_METRICS_METADATA_NAME);
        return null;
    } catch (ConnectionException e) {
        log.error("Error reading metadata value", e);
        Instrumentation.markReadError(e);
        throw new RuntimeException(e);
    } finally {
        ctx.stop();
    }
}
 
Example #7
Source File: AstyanaxStoreManager.java    From titan1withtp3.1 with Apache License 2.0 6 votes vote down vote up
@Override
public Map<String, String> getCompressionOptions(String cf) throws BackendException {
    try {
        Keyspace k = keyspaceContext.getClient();

        KeyspaceDefinition kdef = k.describeKeyspace();

        if (null == kdef) {
            throw new PermanentBackendException("Keyspace " + k.getKeyspaceName() + " is undefined");
        }

        ColumnFamilyDefinition cfdef = kdef.getColumnFamily(cf);

        if (null == cfdef) {
            throw new PermanentBackendException("Column family " + cf + " is undefined");
        }

        return cfdef.getCompressionOptions();
    } catch (ConnectionException e) {
        throw new PermanentBackendException(e);
    }
}
 
Example #8
Source File: EdgeDeleteRepairImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
public Observable<MarkedEdge> repair( final ApplicationScope scope, final MarkedEdge edge, final UUID timestamp ) {


        //merge source and target then deal with the distinct values
        return Observable.just( edge ).filter( markedEdge-> markedEdge.isDeleted() )
                .doOnNext( markedEdge -> {

                        //it's still in the same state as it was when we queued it. Remove it
                        if(logger.isDebugEnabled()){
                            logger.debug( "Removing edge {} ", markedEdge );
                        }


                        //remove from the commit log


                        //remove from storage
                        try {
                            storageSerialization.deleteEdge( scope, markedEdge, timestamp ).execute();
                        }
                        catch ( ConnectionException e ) {
                            throw new RuntimeException( "Unable to connect to casandra", e );
                        }
                    }
              );
    }
 
Example #9
Source File: MigrationInfoSerializationImpl.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public int getStatusCode(final String pluginName) {

    final ScopedRowKey<String> rowKey = ScopedRowKey.fromKey( STATIC_ID, pluginName);

    try {
        return keyspace.prepareQuery( CF_MIGRATION_INFO ).getKey( rowKey ).getColumn( COLUMN_STATUS_CODE )
                       .execute().getResult().getIntegerValue();
    }
    //swallow, it doesn't exist
    catch ( NotFoundException nfe ) {
        return 0;
    }
    catch ( ConnectionException e ) {
        throw new DataMigrationException( "Unable to retrieve status", e );
    }
}
 
Example #10
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteRow(String key) throws PaasException {
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key)).delete();
    
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example #11
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteColumn(String key, String column) throws PaasException {
    LOG.info("Update row");
    invariant();
    
    MutationBatch mb = keyspace.prepareMutationBatch();
    ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key));
    mbRow.deleteColumn(serializers.columnAsByteBuffer(column));
    try {
        mb.execute();
    } catch (ConnectionException e) {
        throw new PaasException(
                String.format("Failed to update row '%s' in column family '%s.%s'" , 
                              key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()),
                e);
    }
}
 
Example #12
Source File: AstyanaxThriftClusterAdminResource.java    From staash with Apache License 2.0 6 votes vote down vote up
@Override
public void updateKeyspace(@PathParam("keyspace") String keyspaceName, KeyspaceEntity keyspace) throws PaasException {
    try {
        if (keyspace.getOptions() == null) {
            return; // Nothing to do 
        }
        
        // Add them as existing values to the properties object
        Properties props = new Properties();
        props.putAll(cluster.getKeyspaceProperties(keyspaceName));
        props.putAll(keyspace.getOptions());
        props.setProperty("name", keyspace.getName());
        keyspace.setClusterName(clusterKey.getClusterName());
        
        cluster.updateKeyspace(props);
        eventBus.post(new KeyspaceUpdateEvent(new KeyspaceKey(clusterKey, keyspace.getName())));
    } catch (ConnectionException e) {
        throw new PaasException(String.format("Error creating keyspace '%s' from cluster '%s'", keyspace.getName(), clusterKey.getClusterName()), e);
    }
}
 
Example #13
Source File: InstanceDataDAOCassandra.java    From Raigad with Apache License 2.0 5 votes vote down vote up
@Inject
public InstanceDataDAOCassandra(IConfiguration config, EurekaHostsSupplier eurekaHostsSupplier)
        throws ConnectionException {
    this.config = config;

    BOOT_CLUSTER = config.getBootClusterName();

    if (BOOT_CLUSTER == null || BOOT_CLUSTER.isEmpty()) {
        throw new RuntimeException("Boot cluster can not be blank. Please use getBootClusterName() property");
    }

    KS_NAME = config.getCassandraKeyspaceName();

    if (KS_NAME == null || KS_NAME.isEmpty()) {
        throw new RuntimeException("Cassandra keyspace can not be blank. Please use getCassandraKeyspaceName() property");
    }

    thriftPortForAstyanax = config.getCassandraThriftPortForAstyanax();

    if (thriftPortForAstyanax <= 0) {
        throw new RuntimeException("Thrift port for Astyanax can not be blank. Please use getCassandraThriftPortForAstyanax() property");
    }

    this.eurekaHostsSupplier = eurekaHostsSupplier;

    if (config.isEurekaHostSupplierEnabled()) {
        ctx = initWithThriftDriverWithEurekaHostsSupplier();
    }
    else {
        ctx = initWithThriftDriverWithExternalHostsSupplier();
    }

    ctx.start();
    bootKeyspace = ctx.getClient();
}
 
Example #14
Source File: KeyspaceUtil.java    From emodb with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a view of the provided Keyspace that pins all operations to the provided host.
 */
public Keyspace toHost(Host host) throws ConnectionException {
    if (!_keyspace.getConnectionPool().getPools().stream()
                    .map(HostConnectionPool::getHost)
                    .anyMatch(poolHost -> poolHost.equals(host))) {
        throw new NoAvailableHostsException("Host not found in pool");
    }

    return pinToVerifiedHost(host);
}
 
Example #15
Source File: UniqueValueEntryMock.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param uniqueValueSerializationStrategy The mock to use
 * @param scope The scope to use
 * @param entityId The entityId to use
 * @param versions The version numbers to mock
 * @throws com.netflix.astyanax.connectionpool.exceptions.ConnectionException
 */
public static UniqueValueEntryMock createUniqueMock(
        final UniqueValueSerializationStrategy uniqueValueSerializationStrategy, final ApplicationScope scope,
        final Id entityId, final List<UUID> versions ) throws ConnectionException {

    UniqueValueEntryMock mock = new UniqueValueEntryMock( entityId, versions );
    mock.initMock( uniqueValueSerializationStrategy, scope );

    return mock;
}
 
Example #16
Source File: AstyanaxMetaDaoImpl.java    From staash with Apache License 2.0 5 votes vote down vote up
public String writeMetaEntity(Entity entity) {
	try {
		String stmt = String.format(PaasUtils.INSERT_FORMAT, MetaConstants.META_KEY_SPACE
				+ "." + MetaConstants.META_COLUMN_FAMILY,
				entity.getRowKey(), entity.getName(), entity.getPayLoad());
		keyspace.prepareCqlStatement().withCql(stmt).execute();
		StaashRequestContext.addContext("Meta_Write", "write succeeded on meta: "+ entity!=null?entity.getPayLoad():null);
	} catch (ConnectionException e) {
		logger.info("Write of the entity failed "+entity!=null?entity.getPayLoad():null);
		StaashRequestContext.addContext("Meta_Write", "write failed on meta: "+ entity!=null?entity.getPayLoad():null);
		throw new RuntimeException(e.getMessage());
	}
	return "{\"msg\":\"ok\"}";
}
 
Example #17
Source File: AstyanaxDataWriterDAO.java    From emodb with Apache License 2.0 5 votes vote down vote up
private boolean isThriftFramedTransportSizeOverrun(Execution<?> execution, ConnectionException exception) {
    // Thrift framed transport size overruns don't have an explicit exception, but they fall under the general
    // umbrella of "unknown" thrift transport exceptions.
    Optional<Throwable> thriftException =
            Iterables.tryFind(Throwables.getCausalChain(exception), Predicates.instanceOf(TTransportException.class));
    //noinspection ThrowableResultOfMethodCallIgnored
    if (!thriftException.isPresent() || ((TTransportException) thriftException.get()).getType() != TTransportException.UNKNOWN) {
        return false;
    }

    return execution instanceof MutationBatch &&
            getMutationBatchSize((MutationBatch) execution) >= MAX_THRIFT_FRAMED_TRANSPORT_SIZE;
}
 
Example #18
Source File: AstyanaxWriter.java    From blueflood with Apache License 2.0 5 votes vote down vote up
public void insertRollups(List<SingleRollupWriteContext> writeContexts) throws ConnectionException {
    if (writeContexts.size() == 0) {
        return;
    }
    Timer.Context ctx = Instrumentation.getBatchWriteTimerContext(writeContexts.get(0).getDestinationCF().getName());
    MutationBatch mb = keyspace.prepareMutationBatch();
    for (SingleRollupWriteContext writeContext : writeContexts) {
        Rollup rollup = writeContext.getRollup();
        int ttl = (int)TTL_PROVIDER.getTTL(
                writeContext.getLocator().getTenantId(),
                writeContext.getGranularity(),
                writeContext.getRollup().getRollupType()).get().toSeconds();
        AbstractSerializer serializer = Serializers.serializerFor(rollup.getClass());
        try {
            mb.withRow(writeContext.getDestinationCF(), writeContext.getLocator())
                    .putColumn(writeContext.getTimestamp(),
                            rollup,
                            serializer,
                            ttl);
        } catch (RuntimeException ex) {
            // let's not let stupidness prevent the rest of this put.
            log.warn(String.format("Cannot save %s", writeContext.getLocator().toString()), ex);
        }
    }
    try {
        mb.execute();
    } catch (ConnectionException e) {
        Instrumentation.markWriteError(e);
        log.error("Error writing rollup batch", e);
        throw e;
    } finally {
        ctx.stop();
    }
}
 
Example #19
Source File: AstyanaxDao.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public void createTable()  throws PersistenceException {
    try {
        keyspace.createColumnFamily(columnFamily, null);
    } catch (ConnectionException e) {
        throw new PersistenceException("Failed to create column family : " + columnFamily.getName(), e);
    }
}
 
Example #20
Source File: AstyanaxDaoSchemaProvider.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void createSchema() {
    final Properties props = ConfigurationConverter.getProperties(configuration.subset(String.format(CONFIG_PREFIX_FORMAT, schemaName.toLowerCase())));
    try {
        props.setProperty("name", props.getProperty("keyspace"));
        LOG.info("Creating schema: " + schemaName + " " + props);
        this.keyspace.createKeyspace(props);
    } catch (ConnectionException e) {
        LOG.error("Failed to create schema '{}' with properties '{}'", new Object[]{schemaName, props.toString(), e});
        throw new RuntimeException("Failed to create keyspace " + keyspace.getKeyspaceName(), e);
    }
}
 
Example #21
Source File: AstyanaxDaoSchemaProvider.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void dropSchema() {
    try {
        this.keyspace.dropKeyspace();
    } catch (ConnectionException e) {
        throw new RuntimeException("Failed to drop keyspace " + keyspace.getKeyspaceName(), e);
    }
}
 
Example #22
Source File: BatchUpdate.java    From emodb with Apache License 2.0 5 votes vote down vote up
private <R> Future<OperationResult<R>> executeAsync(Execution<R> execution) {
    try {
        return execution.executeAsync();
    } catch (ConnectionException e) {
        throw Throwables.propagate(e);
    }
}
 
Example #23
Source File: AstyanaxWriter.java    From blueflood with Apache License 2.0 5 votes vote down vote up
public void writeMetadataValue(Locator locator, String metaKey, String metaValue) throws ConnectionException {
    Timer.Context ctx = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_METADATA_NAME);
    try {
        keyspace.prepareColumnMutation(CassandraModel.CF_METRICS_METADATA, locator, metaKey)
                .putValue(metaValue, StringMetadataSerializer.get(), null)
                .execute();
    } catch (ConnectionException e) {
        Instrumentation.markWriteError(e);
        log.error("Error writing Metadata Value", e);
        throw e;
    } finally {
        ctx.stop();
    }
}
 
Example #24
Source File: AstyanaxThriftDataTableResource.java    From staash with Apache License 2.0 5 votes vote down vote up
@Override
public QueryResult listRows(String cursor, Integer rowLimit, Integer columnLimit) throws PaasException {
    try {
        invariant();
        
        // Execute the query
        Partitioner partitioner = keyspace.getPartitioner();
        Rows<ByteBuffer, ByteBuffer> result = keyspace
            .prepareQuery(columnFamily)
            .getKeyRange(null,  null, cursor != null ? cursor : partitioner.getMinToken(),  partitioner.getMaxToken(),  rowLimit)
            .execute()
            .getResult();
        
        // Convert raw data into a simple sparse tree
        SchemalessRows.Builder builder = SchemalessRows.builder();
        for (Row<ByteBuffer, ByteBuffer> row : result) { 
            Map<String, String> columns = Maps.newHashMap();
            for (Column<ByteBuffer> column : row.getColumns()) {
                columns.put(serializers.columnAsString(column.getRawName()), serializers.valueAsString(column.getRawName(), column.getByteBufferValue()));
            }
            builder.addRow(serializers.keyAsString(row.getKey()), columns);
        }
        
        QueryResult dr = new QueryResult();
        dr.setSrows(builder.build());
        
        if (!result.isEmpty()) {
            dr.setCursor(partitioner.getTokenForKey(Iterables.getLast(result).getKey()));
        }
        return dr;
    } catch (ConnectionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return null;
}
 
Example #25
Source File: LogEntryIterator.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Override
public boolean hasNext() {
    if ( elementItr == null || !elementItr.hasNext() && nextStart != null ) {
        try {
            advance();
        }
        catch ( ConnectionException e ) {
            throw new RuntimeException( "Unable to query cassandra", e );
        }
    }

    return elementItr.hasNext();
}
 
Example #26
Source File: NodeDeleteListenerImpl.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 * Removes this node from the graph.
 *
 * @param scope The scope of the application
 * @param node The node that was deleted
 * @param timestamp The timestamp of the event
 *
 * @return An observable that emits the marked edges that have been removed with this node both as the
 *         target and source
 */
public Observable<MarkedEdge> receive( final ApplicationScope scope, final Id node, final UUID timestamp ) {


    return Observable.just( node )

            //delete source and targets in parallel and merge them into a single observable
            .flatMap( id -> {

                final Optional<Long> maxVersion = nodeSerialization.getMaxVersion( scope, node );

                if (logger.isTraceEnabled()) {
                    logger.trace("Node with id {} has max version of {}", node, maxVersion.orNull());
                }
                if ( !maxVersion.isPresent() ) {
                    return Observable.empty();
                }

                // do all the edge deletes and then remove the marked node, return all edges just deleted
                return
                    doDeletes( node, scope, maxVersion.get(), timestamp ).doOnCompleted( () -> {
                        try {
                            nodeSerialization.delete( scope, node, maxVersion.get()).execute();
                        } catch ( ConnectionException e ) {
                            throw new RuntimeException( "Unable to connect to cassandra", e );
                        }
                    });
            });
}
 
Example #27
Source File: APreaggregatedMetricsRW.java    From blueflood with Apache License 2.0 5 votes vote down vote up
/**
 * Inserts a collection of metrics to the correct column family based on
 * the specified granularity
 *
 * @param metrics
 * @param granularity
 * @throws IOException
 */
@Override
public void insertMetrics(Collection<IMetric> metrics, Granularity granularity) throws IOException {
    try {
        AstyanaxWriter.getInstance().insertMetrics(metrics, CassandraModel.getPreaggregatedColumnFamily(granularity), isRecordingDelayedMetrics, clock);
    } catch (ConnectionException ex) {
        throw new IOException(ex);
    }
}
 
Example #28
Source File: LogEntryIterator.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/**
 * Advance our iterator
 */
public void advance() throws ConnectionException {

    final int requestedSize;
    UUID start;

    if ( nextStart != null ) {
        requestedSize = pageSize + 1;
        start = nextStart;
    }
    else {
        requestedSize = pageSize;
        start = startVersion;
    }

    //loop through even entry that's < this one and remove it
    List<MvccLogEntry> results = logEntrySerializationStrategy.load( scope, entityId, start, requestedSize );

    //we always remove the first version if it's equal since it's returned
    if ( nextStart != null && results.size() > 0 && results.get( 0 ).getVersion().equals( nextStart ) ) {
        results.remove( 0 );
    }



    //we have results, set our next start.  If we miss our start version (due to deletion) and we request a +1, we want to ensure we set our next, hence the >=
    if ( results.size() >= pageSize ) {
        nextStart = results.get( results.size() - 1 ).getVersion();
    }
    //nothing left to do
    else {
        nextStart = null;
    }




    elementItr = results.iterator();
}
 
Example #29
Source File: MvccEntitySerializationStrategyImplTest.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Test(expected = NullPointerException.class)
public void loadListParamEntityId() throws ConnectionException {

    serializationStrategy
            .loadDescendingHistory(
                    new ApplicationScopeImpl( new SimpleId( "organization" ) ), null,
                    UUIDGenerator.newTimeUUID(), 1 );
}
 
Example #30
Source File: AstyanaxMetaDaoImpl.java    From staash with Apache License 2.0 5 votes vote down vote up
public Map<String, JsonObject> runQuery(String key, String col) {
	OperationResult<CqlStatementResult> rs;
	Map<String, JsonObject> resultMap = new HashMap<String, JsonObject>();
	try {
		String queryStr = "";
		if (col != null && !col.equals("*")) {
			queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
					+ key + "' and column1='" + col + "';";
		} else {
			queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
					+ key + "';";
		}
		rs = keyspace.prepareCqlStatement().withCql(queryStr).execute();
		for (Row<String, String> row : rs.getResult().getRows(METACF)) {

			ColumnList<String> columns = row.getColumns();

			String key1 = columns.getStringValue("column1", null);
			String val1 = columns.getStringValue("value", null);
			resultMap.put(key1, new JsonObject(val1));
		}
	} catch (ConnectionException e) {
		e.printStackTrace();
		throw new RuntimeException(e.getMessage());
	}

	return resultMap;
}