Java Code Examples for com.mongodb.client.model.Projections#include()

The following examples show how to use com.mongodb.client.model.Projections#include() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MongoThingsSearchPersistence.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Override
public Source<Metadata, NotUsed> sudoStreamMetadata(final EntityId lowerBound) {
    final Bson notDeletedFilter = Filters.exists(FIELD_DELETE_AT, false);
    final Bson filter = lowerBound.isDummy()
            ? notDeletedFilter
            : Filters.and(notDeletedFilter, Filters.gt(FIELD_ID, lowerBound.toString()));
    final Bson relevantFieldsProjection =
            Projections.include(FIELD_ID, FIELD_REVISION, FIELD_POLICY_ID, FIELD_POLICY_REVISION,
                    FIELD_PATH_MODIFIED);
    final Bson sortById = Sorts.ascending(FIELD_ID);
    final Publisher<Document> publisher = collection.find(filter)
            .projection(relevantFieldsProjection)
            .sort(sortById);
    return Source.fromPublisher(publisher).map(MongoThingsSearchPersistence::readAsMetadata);
}
 
Example 2
Source File: DifferConversationStore.java    From EDDI with Apache License 2.0 5 votes vote down vote up
List<String> getAllDifferConversationIds() {
    List<String> ret = new LinkedList<>();

    var includeConversationIdField = Projections.include(CONVERSATION_ID_FIELD);
    var documents = collection.find().projection(includeConversationIdField);
    for (var conversationInfo : documents) {
        ret.add(conversationInfo.getConversationId());
    }

    return ret;
}
 
Example 3
Source File: AggregationPipelineQueryNode.java    From rya with Apache License 2.0 votes vote down vote up
/**
 * Given that the current state of the pipeline produces data that can be
 * interpreted as triples, add a project step to map each result from the
 * intermediate result structure to a structure that can be stored in the
 * triple store. Does not modify the internal pipeline, which will still
 * produce intermediate results suitable for query evaluation.
 * @param timestamp Attach this timestamp to the resulting triples.
 * @param requireNew If true, add an additional step to check constructed
 *  triples against existing triples and only include new ones in the
 *  result. Adds a potentially expensive $lookup step.
 * @throws IllegalStateException if the results produced by the current
 *  pipeline do not have variable names allowing them to be interpreted as
 *  triples (i.e. "subject", "predicate", and "object").
 */
public List<Bson> getTriplePipeline(final long timestamp, final boolean requireNew) {
    if (!assuredBindingNames.contains(SUBJECT)
            || !assuredBindingNames.contains(PREDICATE)
            || !assuredBindingNames.contains(OBJECT)) {
        throw new IllegalStateException("Current pipeline does not produce "
                + "records that can be converted into triples.\n"
                + "Required variable names: <" + SUBJECT + ", " + PREDICATE
                + ", " + OBJECT + ">\nCurrent variable names: "
                + assuredBindingNames);
    }
    final List<Bson> triplePipeline = new LinkedList<>(pipeline);
    final List<Bson> fields = new LinkedList<>();
    fields.add(Projections.computed(SUBJECT, valueFieldExpr(SUBJECT)));
    fields.add(Projections.computed(SUBJECT_HASH, hashFieldExpr(SUBJECT)));
    fields.add(Projections.computed(PREDICATE, valueFieldExpr(PREDICATE)));
    fields.add(Projections.computed(PREDICATE_HASH, hashFieldExpr(PREDICATE)));
    fields.add(Projections.computed(OBJECT, valueFieldExpr(OBJECT)));
    fields.add(Projections.computed(OBJECT_HASH, hashFieldExpr(OBJECT)));
    fields.add(Projections.computed(OBJECT_TYPE,
            ConditionalOperators.ifNull(typeFieldExpr(OBJECT), DEFAULT_TYPE)));
    fields.add(Projections.computed(CONTEXT, DEFAULT_CONTEXT));
    fields.add(Projections.computed(STATEMENT_METADATA, DEFAULT_METADATA));
    fields.add(DEFAULT_DV);
    fields.add(Projections.computed(TIMESTAMP, new Document("$literal", timestamp)));
    fields.add(Projections.computed(LEVEL, new Document("$add", Arrays.asList("$" + LEVEL, 1))));
    triplePipeline.add(Aggregates.project(Projections.fields(fields)));
    if (requireNew) {
        // Prune any triples that already exist in the data store
        final String collectionName = collection.getNamespace().getCollectionName();
        final Bson includeAll = Projections.include(SUBJECT, SUBJECT_HASH,
                PREDICATE, PREDICATE_HASH, OBJECT, OBJECT_HASH,
                OBJECT_TYPE, CONTEXT, STATEMENT_METADATA,
                DOCUMENT_VISIBILITY, TIMESTAMP, LEVEL);
        final List<Bson> eqTests = new LinkedList<>();
        eqTests.add(new Document("$eq", Arrays.asList("$$this." + PREDICATE_HASH, "$" + PREDICATE_HASH)));
        eqTests.add(new Document("$eq", Arrays.asList("$$this." + OBJECT_HASH, "$" + OBJECT_HASH)));
        final Bson redundantFilter = new Document("$filter", new Document("input", "$" + JOINED_TRIPLE)
                .append("as", "this").append("cond", new Document("$and", eqTests)));
        triplePipeline.add(Aggregates.lookup(collectionName, SUBJECT_HASH,
                SUBJECT_HASH, JOINED_TRIPLE));
        final String numRedundant = "REDUNDANT";
        triplePipeline.add(Aggregates.project(Projections.fields(includeAll,
                Projections.computed(numRedundant, new Document("$size", redundantFilter)))));
        triplePipeline.add(Aggregates.match(Filters.eq(numRedundant, 0)));
        triplePipeline.add(Aggregates.project(Projections.fields(includeAll)));
    }
    return triplePipeline;
}