Java Code Examples for me.prettyprint.hector.api.query.RangeSlicesQuery#execute()

The following examples show how to use me.prettyprint.hector.api.query.RangeSlicesQuery#execute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 6 votes vote down vote up
@Override
public void removeFacetFromArtifact( RepositorySession session, final String repositoryId, final String namespace, final String project,
                                     final String version, final MetadataFacet metadataFacet )
    throws MetadataRepositoryException
{

    RangeSlicesQuery<String, String, String> query = HFactory //
        .createRangeSlicesQuery( keyspace, ss, ss, ss ) //
        .setColumnFamily( cassandraArchivaManager.getArtifactMetadataFamilyName() ) //
        .setColumnNames( NAMESPACE_ID.toString() ); //

    query = query.addEqualsExpression( REPOSITORY_NAME.toString(), repositoryId ) //
        .addEqualsExpression( NAMESPACE_ID.toString(), namespace ) //
        .addEqualsExpression( PROJECT.toString(), project ) //
        .addEqualsExpression( VERSION.toString(), version );

    QueryResult<OrderedRows<String, String, String>> result = query.execute();

    for ( Row<String, String, String> row : result.get() )
    {
        this.artifactMetadataTemplate.deleteRow( row.getKey() );
    }
}
 
Example 2
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 6 votes vote down vote up
@Override
public Stream<ArtifactMetadata> getArtifactStream( final RepositorySession session, final String repositoryId,
                                                   final QueryParameter queryParameter ) throws MetadataResolutionException
{
    RangeSlicesQuery<String, String, String> query = HFactory //
        .createRangeSlicesQuery( keyspace, ss, ss, ss ) //
        .setColumnFamily( cassandraArchivaManager.getArtifactMetadataFamilyName( ) ) //
        .setColumnNames( ArtifactMetadataModel.COLUMNS ); //

    query = query.addEqualsExpression( REPOSITORY_NAME.toString(), repositoryId );

    QueryResult<OrderedRows<String, String, String>> result = query.execute();

    try
    {
        return StreamSupport.stream( createResultSpliterator( result, ( Row<String, String, String> row, ArtifactMetadata last ) ->
            mapArtifactMetadataStringColumnSlice( row.getKey( ), row.getColumnSlice( ) ) ), false )
            .skip( queryParameter.getOffset( ) ).limit( queryParameter.getLimit( ) );
    }
    catch ( MetadataRepositoryException e )
    {
        throw new MetadataResolutionException( e.getMessage( ), e );
    }
}
 
Example 3
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 5 votes vote down vote up
@Override
public List<ArtifactMetadata> getArtifactsByDateRange( RepositorySession session, final String repositoryId, final ZonedDateTime startTime,
                                                       final ZonedDateTime endTime, QueryParameter queryParameter )
    throws MetadataRepositoryException
{

    LongSerializer ls = LongSerializer.get();
    RangeSlicesQuery<String, String, Long> query = HFactory //
        .createRangeSlicesQuery( keyspace, ss, ss, ls ) //
        .setColumnFamily( cassandraArchivaManager.getArtifactMetadataFamilyName() ) //
        .setColumnNames( ArtifactMetadataModel.COLUMNS ); //


    if ( startTime != null )
    {
        query = query.addGteExpression( WHEN_GATHERED.toString(), startTime.toInstant().toEpochMilli() );
    }
    if ( endTime != null )
    {
        query = query.addLteExpression( WHEN_GATHERED.toString(), endTime.toInstant().toEpochMilli() );
    }
    QueryResult<OrderedRows<String, String, Long>> result = query.execute();

    List<ArtifactMetadata> artifactMetadatas = new ArrayList<>( result.get().getCount() );
    Iterator<Row<String, String, Long>> keyIter = result.get().iterator();
    if (keyIter.hasNext()) {
        String key = keyIter.next().getKey();
        for (Row<String, String, Long> row : result.get()) {
            ColumnSlice<String, Long> columnSlice = row.getColumnSlice();
            String repositoryName = getAsStringValue(columnSlice, REPOSITORY_NAME.toString());
            if (StringUtils.equals(repositoryName, repositoryId)) {

                artifactMetadatas.add(mapArtifactMetadataLongColumnSlice(key, columnSlice));
            }
        }
    }

    return artifactMetadatas;
}
 
Example 4
Source File: HectorPolicyManagerImpl.java    From ck with Apache License 2.0 4 votes vote down vote up
@Override
public List<PolicyDAO> getAllPolicies() {
    List<NamedColumn<UUID, String, String>> list = CollectionUtils.list(
            Schema.POLICIES.SHORT_NAME,
            Schema.POLICIES.DESCRIPTION);
    RangeSlicesQuery<UUID, String, String> query =
            Schema.POLICIES.createRangeSlicesQuery(_keyspace,
                    list);

    // TODO: may need paging of data once we have more than a few hundred.
    //       This may need some sort of indexing since we're using RandomPartitioner,
    //       in order to return them in a useful order.
    query.setRowCount(1000);
    // TODO: needed?
    // query.setKeys("fake_key_0", "fake_key_4");

    QueryResult<OrderedRows<UUID, String, String>> result = query.execute();

    OrderedRows<UUID, String, String> orderedRows = result.get();
    if (orderedRows == null) {
        return Collections.emptyList();
    }

    return Functional.filter(orderedRows.getList(),
            new Filter<Row<UUID, String, String>, PolicyDAO>() {
                @Override
                public PolicyDAO filter(Row<UUID, String, String> row) throws SkippedElementException {
                    ColumnSlice<String, String> cs = row.getColumnSlice();
                    if (cs == null) {
                        throw new SkippedElementException();
                    }

                    String shortName;
                    try {
                        shortName = getNonNullStringColumn(cs, Schema.POLICIES.SHORT_NAME.getName());
                    } catch (NoSuchColumnException e) {
                        // Tombstone row
                        throw new SkippedElementException();
                    }

                    String description = getStringColumnOrNull(cs, Schema.POLICIES.DESCRIPTION.getName());

                    // FIXME: can't get date from string result.
                    //        To fix this, we need variable-value-typed range slices queries.
                    return new PolicyDAOImpl(new HectorPolicyIDImpl(row.getKey()), shortName, description,
                            new Date());
                }
            });
}
 
Example 5
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 4 votes vote down vote up
/**
 * Lists the users in the user store.
 */
@Override
protected String[] doListUsers(String filter, int maxItemLimit) throws UserStoreException {

    List<String> users = new ArrayList<String>();
    int arrayLength = 0;

    if (maxItemLimit == 0) {
        return new String[0];
    }

    int givenMax = UserCoreConstants.MAX_USER_ROLE_LIST;

    try {
        givenMax = Integer.parseInt(realmConfig
                .getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_USER_LIST));
    } catch (Exception e) {
        givenMax = UserCoreConstants.MAX_USER_ROLE_LIST;

        if (log.isDebugEnabled()) {
            log.debug("Realm configuration maximum not set : Using User Core Constant value instead!", e);
        }
    }

    if (maxItemLimit < 0 || maxItemLimit > givenMax) {
        maxItemLimit = givenMax;
    }

    RangeSlicesQuery<String, String, String> rangeSliceQuery = HFactory.createRangeSlicesQuery(keyspace,
            stringSerializer, stringSerializer, stringSerializer);

    rangeSliceQuery.setColumnFamily(CFConstants.UM_USER);
    rangeSliceQuery.setRange(filter, null, false, Integer.MAX_VALUE);
    rangeSliceQuery.addEqualsExpression(CFConstants.UM_TENANT_ID, tenantIdString);

    // TODO - Need to check how to use the filter for range
    rangeSliceQuery.setKeys("", "");
    rangeSliceQuery.setRowCount(maxItemLimit);
    QueryResult<OrderedRows<String, String, String>> result = rangeSliceQuery.execute();
    if (result != null) {
        OrderedRows<String, String, String> rows = result.get();
        if (rows.getCount() <= 0) {
            // reformatted to avoid nesting too many blocks
            return users.toArray(new String[arrayLength]);

        }
        arrayLength = rows.getCount();

        Iterator<Row<String, String, String>> rowsIterator = rows.iterator();

        while (rowsIterator.hasNext()) {
            Row<String, String, String> row = rowsIterator.next();
            if (row.getColumnSlice().getColumnByName(CFConstants.UM_USER_ID).getValue() != null) {
                String name = row.getColumnSlice().getColumnByName(CFConstants.UM_USER_NAME).getValue();
                // append the domain if exist
                name = UserCoreUtil.addDomainToName(name, domain);
                users.add(name);
            }
        }

    }
    return users.toArray(new String[arrayLength]);

}
 
Example 6
Source File: CassandraUserStoreManager.java    From carbon-identity with Apache License 2.0 4 votes vote down vote up
/**
 * Get the role names in the roles store.
 */
@Override
public String[] doGetRoleNames(String filter, int maxItemLimit) throws UserStoreException {
    List<String> roles = new ArrayList<String>();

    if (maxItemLimit == 0) {
        return new String[0];
    }

    int givenMax = UserCoreConstants.MAX_USER_ROLE_LIST;

    try {
        givenMax = Integer.parseInt(realmConfig
                .getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_ROLE_LIST));
    } catch (Exception e) {
        givenMax = UserCoreConstants.MAX_USER_ROLE_LIST;

        if (log.isDebugEnabled()) {
            log.debug("Realm configuration maximum not set : Using User Core Constant value instead!", e);
        }
    }

    if (maxItemLimit < 0 || maxItemLimit > givenMax) {
        maxItemLimit = givenMax;
    }

    int arrayLength = 0;
    String domain = realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_DOMAIN_NAME);
    RangeSlicesQuery<String, String, String> rangeSliceQuery = HFactory.createRangeSlicesQuery(keyspace,
            stringSerializer, stringSerializer, stringSerializer);
    rangeSliceQuery.setColumnFamily(CFConstants.UM_ROLES);
    rangeSliceQuery.setRange(null, null, false, Integer.MAX_VALUE);
    rangeSliceQuery.addEqualsExpression(CFConstants.UM_TENANT_ID, tenantIdString);
    rangeSliceQuery.setKeys("", "");
    rangeSliceQuery.setRowCount(maxItemLimit);
    QueryResult<OrderedRows<String, String, String>> result = rangeSliceQuery.execute();
    if (result != null) {
        OrderedRows<String, String, String> rows = result.get();
        if (rows.getCount() <= 0) {
            return roles.toArray(new String[arrayLength]);
        }
        arrayLength = rows.getCount();

        Iterator<Row<String, String, String>> rowsIterator = rows.iterator();

        while (rowsIterator.hasNext()) {
            Row<String, String, String> row = rowsIterator.next();
            if (row.getColumnSlice().getColumnByName(CFConstants.UM_ROLE_NAME).getValue() != null) {
                String name = row.getColumnSlice().getColumnByName(CFConstants.UM_ROLE_NAME).getValue();
                // append the domain if exist
                name = UserCoreUtil.addDomainToName(name, domain);
                roles.add(name);
            }
        }

    }
    return roles.toArray(new String[arrayLength]);
}
 
Example 7
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 4 votes vote down vote up
@Override
public List<ArtifactMetadata> getArtifactsByAttribute( RepositorySession session, String key, String value, String repositoryId )
    throws MetadataRepositoryException
{
    RangeSlicesQuery<String, String, String> query =
        HFactory.createRangeSlicesQuery( keyspace, ss, ss, ss ) //
        .setColumnFamily( cassandraArchivaManager.getMetadataFacetFamilyName() ) //
        .setColumnNames( MetadataFacetModel.COLUMNS ) //
        .addEqualsExpression( VALUE.toString(), value );

    if ( key != null )
    {
        query.addEqualsExpression( KEY.toString(), key ); //
    }
    if ( repositoryId != null )
    {
        query.addEqualsExpression( "repositoryName", repositoryId );
    }

    QueryResult<OrderedRows<String, String, String>> metadataFacetResult = query.execute();
    if ( metadataFacetResult.get() == null || metadataFacetResult.get().getCount() < 1 )
    {
        return Collections.emptyList();
    }

    List<ArtifactMetadata> artifactMetadatas = new LinkedList<>( );

    // TODO doing multiple queries, there should be a way to get all the artifactMetadatas for any number of
    // projects
    for ( Row<String, String, String> row : metadataFacetResult.get() )
    {
        QueryResult<OrderedRows<String, String, String>> artifactMetadataResult =
            HFactory.createRangeSlicesQuery( keyspace, ss, ss, ss ) //
            .setColumnFamily( cassandraArchivaManager.getArtifactMetadataFamilyName() ) //
            .setColumnNames( ArtifactMetadataModel.COLUMNS ) //
            .setRowCount( Integer.MAX_VALUE ) //
            .addEqualsExpression( REPOSITORY_NAME.toString(),
                                  getStringValue( row.getColumnSlice(), REPOSITORY_NAME ) ) //
            .addEqualsExpression( NAMESPACE_ID.toString(), getStringValue( row.getColumnSlice(), NAMESPACE_ID ) ) //
            .addEqualsExpression( PROJECT.toString(), getStringValue( row.getColumnSlice(), PROJECT_ID ) ) //
            .addEqualsExpression( PROJECT_VERSION.toString(),
                                  getStringValue( row.getColumnSlice(), PROJECT_VERSION ) ) //
            .execute();

        if ( artifactMetadataResult.get() == null || artifactMetadataResult.get().getCount() < 1 )
        {
            return Collections.emptyList();
        }

        for ( Row<String, String, String> artifactMetadataRow : artifactMetadataResult.get() )
        {
            String artifactKey = artifactMetadataRow.getKey();
            artifactMetadatas.add( mapArtifactMetadataStringColumnSlice( artifactKey, artifactMetadataRow.getColumnSlice() ) );
        }
    }

    return mapArtifactFacetToArtifact( metadataFacetResult, artifactMetadatas );
}
 
Example 8
Source File: CassandraMetadataRepository.java    From archiva with Apache License 2.0 3 votes vote down vote up
@Override
public List<ArtifactMetadata> getArtifacts( RepositorySession session, final String repositoryId )
    throws MetadataRepositoryException
{

    RangeSlicesQuery<String, String, String> query = HFactory //
        .createRangeSlicesQuery( keyspace, ss, ss, ss ) //
        .setColumnFamily( cassandraArchivaManager.getArtifactMetadataFamilyName() ) //
        .setColumnNames( ArtifactMetadataModel.COLUMNS ); //

    query = query.addEqualsExpression( REPOSITORY_NAME.toString(), repositoryId );

    QueryResult<OrderedRows<String, String, String>> result = query.execute();



    List<ArtifactMetadata> artifactMetadatas = new ArrayList<>( result.get().getCount() );

    for ( Row<String, String, String> row : result.get() )
    {
        String key = row.getKey();
        ColumnSlice<String, String> columnSlice = row.getColumnSlice();
        artifactMetadatas.add( mapArtifactMetadataStringColumnSlice( key, columnSlice ) );

    }

    return artifactMetadatas;
}