Python sqlalchemy.sql.func.sum() Examples

The following are 30 code examples of sqlalchemy.sql.func.sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlalchemy.sql.func , or try the search function .
Example #1
Source File: api.py    From manila with Apache License 2.0 6 votes vote down vote up
def share_replica_data_get_for_project(context, project_id, user_id=None,
                                       session=None, share_type_id=None):
    session = session or get_session()
    query = model_query(
        context, models.ShareInstance,
        func.count(models.ShareInstance.id),
        func.sum(models.Share.size),
        read_deleted="no",
        session=session).join(
        models.Share,
        models.ShareInstance.share_id == models.Share.id).filter(
        models.Share.project_id == project_id).filter(
        models.ShareInstance.replica_state.isnot(None))

    if share_type_id:
        query = query.filter(
            models.ShareInstance.share_type_id == share_type_id)
    elif user_id:
        query = query.filter(models.Share.user_id == user_id)

    result = query.first()
    return result[0] or 0, result[1] or 0 
Example #2
Source File: api.py    From manila with Apache License 2.0 6 votes vote down vote up
def snapshot_data_get_for_project(context, project_id, user_id,
                                  share_type_id=None, session=None):
    query = (model_query(context, models.ShareSnapshot,
                         func.count(models.ShareSnapshot.id),
                         func.sum(models.ShareSnapshot.size),
                         read_deleted="no",
                         session=session).
             filter_by(project_id=project_id))

    if share_type_id:
        query = query.join(
            models.ShareInstance,
            models.ShareInstance.share_id == models.ShareSnapshot.share_id,
        ).filter_by(share_type_id=share_type_id)
    elif user_id:
        query = query.filter_by(user_id=user_id)
    result = query.first()

    return result[0] or 0, result[1] or 0 
Example #3
Source File: test_cte.py    From sqlalchemy with MIT License 6 votes vote down vote up
def test_recursive_union_alias_two(self):
        """

        """

        # I know, this is the PG VALUES keyword,
        # we're cheating here.  also yes we need the SELECT,
        # sorry PG.
        t = select([func.values(1).label("n")]).cte("t", recursive=True)
        t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100)).alias("ta")
        s = select([func.sum(t.c.n)])
        self.assert_compile(
            s,
            "WITH RECURSIVE t(n) AS "
            "(SELECT values(:values_1) AS n "
            "UNION ALL SELECT t.n + :n_1 AS anon_1 "
            "FROM t "
            "WHERE t.n < :n_2) "
            "SELECT sum(ta.n) AS sum_1 FROM t AS ta",
        ) 
Example #4
Source File: api.py    From manila with Apache License 2.0 6 votes vote down vote up
def service_get_all_share_sorted(context):
    session = get_session()
    with session.begin():
        topic = CONF.share_topic
        label = 'share_gigabytes'
        subq = (model_query(context, models.Share,
                            func.sum(models.Share.size).label(label),
                            session=session, read_deleted="no").
                join(models.ShareInstance,
                     models.ShareInstance.share_id == models.Share.id).
                group_by(models.ShareInstance.host).
                subquery())
        return _service_get_all_topic_subquery(context,
                                               session,
                                               topic,
                                               subq,
                                               label) 
Example #5
Source File: monitor.py    From choochoo with GNU General Public License v2.0 5 votes vote down vote up
def _read_data(self, s, start, finish):
        daily_steps = s.query(func.sum(StatisticJournalInteger.value)).join(StatisticName). \
            filter(StatisticName.name == Names.STEPS,
                   StatisticName.owner == self.owner_in,
                   StatisticJournalInteger.time < finish,
                   StatisticJournalInteger.time >= start).scalar()
        input_source_ids = [row[0] for row in s.query(MonitorJournal.id).
            filter(MonitorJournal.start < finish,
                   MonitorJournal.finish >= start).all()]
        return input_source_ids, daily_steps 
Example #6
Source File: aggregate_job_stats.py    From zeus with Apache License 2.0 5 votes vote down vote up
def aggregate_stat_for_build(build: Build, name: str, func_=func.sum):
    """
    Aggregates a single stat for all jobs the given build.
    """
    if name == "tests.count_unique":
        value = (
            db.session.query(func.count(TestCase.hash.distinct()))
            .join(Job, TestCase.job_id == Job.id)
            .filter(Job.build_id == build.id)
            .as_scalar()
        )
    elif name == "tests.failures_unique":
        value = (
            db.session.query(func.count(TestCase.hash.distinct()))
            .join(Job, TestCase.job_id == Job.id)
            .filter(TestCase.result == Result.failed, Job.build_id == build.id)
            .as_scalar()
        )
    else:
        value = (
            db.session.query(func.coalesce(func_(ItemStat.value), 0))
            .filter(
                ItemStat.item_id.in_(
                    db.session.query(Job.id).filter(Job.build_id == build.id)
                ),
                ItemStat.name == name,
            )
            .as_scalar()
        )

    create_or_update(
        model=ItemStat,
        where={"item_id": build.id, "name": name},
        values={"value": value},
    ) 
Example #7
Source File: aggregate_job_stats.py    From zeus with Apache License 2.0 5 votes vote down vote up
def record_test_stats(job_id: UUID):
    create_or_update(
        ItemStat,
        where={"item_id": job_id, "name": "tests.count"},
        values={
            "value": db.session.query(func.count(TestCase.id))
            .filter(TestCase.job_id == job_id)
            .as_scalar()
        },
    )
    create_or_update(
        ItemStat,
        where={"item_id": job_id, "name": "tests.failures"},
        values={
            "value": db.session.query(func.count(TestCase.id))
            .filter(TestCase.job_id == job_id, TestCase.result == Result.failed)
            .as_scalar()
        },
    )
    create_or_update(
        ItemStat,
        where={"item_id": job_id, "name": "tests.duration"},
        values={
            "value": db.session.query(func.coalesce(func.sum(TestCase.duration), 0))
            .filter(TestCase.job_id == job_id)
            .as_scalar()
        },
    )
    db.session.flush() 
Example #8
Source File: aggregate_job_stats.py    From zeus with Apache License 2.0 5 votes vote down vote up
def record_coverage_stats(build_id: UUID):
    """
    Aggregates all FileCoverage stats for the given build.
    """
    coverage_stats = (
        db.session.query(
            func.sum(FileCoverage.lines_covered).label("coverage.lines_covered"),
            func.sum(FileCoverage.lines_uncovered).label("coverage.lines_uncovered"),
            func.sum(FileCoverage.diff_lines_covered).label(
                "coverage.diff_lines_covered"
            ),
            func.sum(FileCoverage.diff_lines_uncovered).label(
                "coverage.diff_lines_uncovered"
            ),
        )
        .filter(FileCoverage.build_id == build_id)
        .group_by(FileCoverage.build_id)
        .first()
    )

    # TODO(dcramer): it'd be safer if we did this query within SQL
    stat_list = (
        "coverage.lines_covered",
        "coverage.lines_uncovered",
        "coverage.diff_lines_covered",
        "coverage.diff_lines_uncovered",
    )
    if not any(getattr(coverage_stats, n, None) is not None for n in stat_list):
        ItemStat.query.filter(
            ItemStat.item_id == build_id, ItemStat.name.in_(stat_list)
        ).delete(synchronize_session=False)
    else:
        for name in stat_list:
            create_or_update(
                model=ItemStat,
                where={"item_id": build_id, "name": name},
                values={"value": getattr(coverage_stats, name, 0) or 0},
            ) 
Example #9
Source File: aggregate_job_stats.py    From zeus with Apache License 2.0 5 votes vote down vote up
def record_bundle_stats(job_id: UUID):
    create_or_update(
        ItemStat,
        where={"item_id": job_id, "name": "bundle.total_asset_size"},
        values={
            "value": db.session.query(func.coalesce(func.sum(BundleAsset.size), 0))
            .filter(BundleAsset.job_id == job_id)
            .as_scalar()
        },
    )
    db.session.flush() 
Example #10
Source File: repository_tests.py    From zeus with Apache License 2.0 5 votes vote down vote up
def get(self, repo: Repository):
        """
        Return a historical view of testcase results for the given repository.
        """

        runs_failed = func.sum(TestCaseRollup.runs_failed).label("runs_failed")

        query = (
            db.session.query(
                TestCaseRollup.hash,
                TestCaseRollup.name,
                func.sum(TestCaseRollup.total_runs).label("total_runs"),
                runs_failed,
                (
                    func.sum(TestCaseRollup.total_duration)
                    / func.sum(TestCaseRollup.total_runs)
                ).label("avg_duration"),
            )
            .filter(
                # HACK(dcramer): we're working around the postgres 9.6 query planner refusing to use
                # our index here and doing a full sequence scan on testcase.. but only when the repository_id
                # is a fixed value
                TestCaseRollup.repository_id == repo.id,
                TestCaseRollup.date >= timezone.now() - timedelta(days=30),
            )
            .group_by(TestCaseRollup.hash, TestCaseRollup.name)
            .order_by(runs_failed.desc())
        )

        return self.paginate_with_schema(testcases_schema, query) 
Example #11
Source File: xlsx_export.py    From mma-dexter with Apache License 2.0 5 votes vote down vote up
def write_summed_table(self, ws, name, query, rownum=0):
        """
        For a query which returns three columns, [A, B, C],
        write a table that uses A as row labels, B values as column
        labels, and C as counts for each.

        The query must return rows ordered by the first column.

        Returns number of rows written, including headers and footers.
        """
        row_label = query.column_descriptions[0]['name']

        # calculate col labels dynamically
        col_labels = set()

        data = OrderedDict()
        for label, rows in groupby(query.all(), lambda r: r[0]):
            data[label or '(none)'] = row = defaultdict(int)

            for r in rows:
                col_label = r[1] or '(none)'
                col_labels.add(col_label)
                row[col_label] = r[2]
                row['total'] += r[2]

        # final column labels
        col_labels = sorted(list(col_labels)) + ['total']
        keys = [row_label] + col_labels

        # decompose rows into a list of values
        data = [[label] + [r[col] for col in col_labels] for label, r in data.iteritems()]

        ws.add_table(rownum, 0, rownum + len(data) + 1, len(keys) - 1, {
            'name': name,
            'total_row': True,
            'columns': [{'header': k, 'total_function': 'sum' if i > 0 else None} for i, k in enumerate(keys)],
            'data': data,
        })

        # number of rows plus header and footer
        return len(data) + 2 
Example #12
Source File: test_cte.py    From sqlalchemy with MIT License 5 votes vote down vote up
def test_recursive_union_no_alias_two(self):
        """

        pg's example::

            WITH RECURSIVE t(n) AS (
                VALUES (1)
              UNION ALL
                SELECT n+1 FROM t WHERE n < 100
            )
            SELECT sum(n) FROM t;

        """

        # I know, this is the PG VALUES keyword,
        # we're cheating here.  also yes we need the SELECT,
        # sorry PG.
        t = select([func.values(1).label("n")]).cte("t", recursive=True)
        t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100))
        s = select([func.sum(t.c.n)])
        self.assert_compile(
            s,
            "WITH RECURSIVE t(n) AS "
            "(SELECT values(:values_1) AS n "
            "UNION ALL SELECT t.n + :n_1 AS anon_1 "
            "FROM t "
            "WHERE t.n < :n_2) "
            "SELECT sum(t.n) AS sum_1 FROM t",
        ) 
Example #13
Source File: preprocessed.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def char_count(self) -> int:
    """Get the total number of characters in the pre-processed corpus.

    This excludes contentfiles which did not pre-process successfully.
    """
    with self.Session() as session:
      return (
        session.query(func.sum(PreprocessedContentFile.charcount))
        .filter(PreprocessedContentFile.preprocessing_succeeded == True)
        .scalar()
      ) 
Example #14
Source File: preprocessed.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def line_count(self) -> int:
    """Get the total number of lines in the pre-processed corpus.

    This excludes contentfiles which did not pre-process successfully.
    """
    with self.Session() as session:
      return (
        session.query(func.sum(PreprocessedContentFile.linecount))
        .filter(PreprocessedContentFile.preprocessing_succeeded == True)
        .scalar()
      ) 
Example #15
Source File: preprocessed.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def input_char_count(self) -> int:
    """Get the total number of characters in the input content files."""
    with self.Session() as session:
      return session.query(
        func.sum(PreprocessedContentFile.input_charcount)
      ).scalar() 
Example #16
Source File: preprocessed.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def input_line_count(self) -> int:
    """Get the total number of characters in the input content files."""
    with self.Session() as session:
      return session.query(
        func.sum(PreprocessedContentFile.input_linecount)
      ).scalar() 
Example #17
Source File: encoded.py    From clgen with GNU General Public License v3.0 5 votes vote down vote up
def token_count(self) -> int:
    """Return the total number of tokens in the encoded corpus.

    This excludes the EOF markers which are appended to each encoded text.
    """
    with self.Session() as session:
      return session.query(func.sum(EncodedContentFile.tokencount)).scalar() 
Example #18
Source File: pysql.py    From spider163 with MIT License 5 votes vote down vote up
def stat_music():
    data = {"author-comment-count": []}
    cd = settings.Session.query(Music163.author.label('author'), func.sum(Music163.comment).label('count')).group_by("author").order_by(func.sum(Music163.comment).label('count').label('count').desc()).limit(30).all()
    for m in cd:
        data["author-comment-count"].append([m[0], int(m[1])])
    data["music-comment-count"] = settings.Session.query(Music163.song_name, Music163.comment.label("count")).order_by(Music163.comment.label("count").desc()).limit(30).all()
    return data 
Example #19
Source File: api.py    From zun with Apache License 2.0 5 votes vote down vote up
def count_usage(self, context, container_type, project_id, flag):
        session = get_session()
        if flag == 'containers':
            project_query = session.query(
                func.count(models.Container.id)). \
                filter_by(project_id=project_id). \
                filter_by(container_type=container_type)
        elif flag in ['disk', 'cpu', 'memory']:
            project_query = session.query(
                func.sum(getattr(models.Container, flag))). \
                filter_by(project_id=project_id). \
                filter_by(container_type=container_type)

        return project_query.first() 
Example #20
Source File: xlsx_export.py    From mma-dexter with Apache License 2.0 5 votes vote down vote up
def child_context_worksheet(self, wb):
        from dexter.models.views import DocumentChildrenView

        rows = self.filter(
            db.session.query(
                func.sum(DocumentChildrenView.c.basic_context == 'basic-context', type_=Integer).label('basic_context'),
                func.sum(DocumentChildrenView.c.causes_mentioned == 'causes-mentioned', type_=Integer).label('causes_mentioned'),
                func.sum(DocumentChildrenView.c.consequences_mentioned == 'consequences-mentioned', type_=Integer).label('consequences_mentioned'),
                func.sum(DocumentChildrenView.c.solutions_offered == 'solutions-offered', type_=Integer).label('solutions_offered'),
                func.sum(DocumentChildrenView.c.relevant_policies == 'relevant-policies', type_=Integer).label('relevant_policies'),
                func.sum(DocumentChildrenView.c.self_help_offered == 'self-help-offered', type_=Integer).label('self_help_offered'),
            )
            .join(Document)).all()
        if not rows:
            return

        ws = wb.add_worksheet('child_context')

        d = rows[0]._asdict()
        data = [[k, d[k]] for k in d.keys()]
        ws.add_table(0, 0, len(data), 1, {
            'name': 'ChildContext',
            'data': data,
            'columns': [
                {'header': ''},
                {'header': 'count'},
            ]
        }) 
Example #21
Source File: xlsx_export.py    From mma-dexter with Apache License 2.0 5 votes vote down vote up
def child_victimisation_worksheet(self, wb):
        from dexter.models.views import DocumentChildrenView

        ws = wb.add_worksheet('child_secondary_victimisation')

        rows = self.filter(
            db.session.query(
                func.sum(DocumentChildrenView.c.secondary_victim_source == 'secondary-victim-source', type_=Integer).label('secondary_victim_source'),
                func.sum(DocumentChildrenView.c.secondary_victim_identified == 'secondary-victim-identified', type_=Integer).label('secondary_victim_identified'),
                func.sum(DocumentChildrenView.c.secondary_victim_victim_of_abuse == 'secondary-victim-abused', type_=Integer).label('secondary_victim_victim_of_abuse'),
                func.sum(DocumentChildrenView.c.secondary_victim_source_identified_abused == 'secondary-victim-source-identified-abused', type_=Integer).label('secondary_victim_source_identified_abused'),
            )
            .join(Document)).all()
        if not rows:
            return

        d = rows[0]._asdict()
        data = [[k, d[k]] for k in sorted(d.keys(), key=len)]
        ws.add_table(0, 0, len(data), 1, {
            'name': 'ChildSecondaryVictimisation',
            'data': data,
            'columns': [
                {'header': ''},
                {'header': 'count'},
            ]
        }) 
Example #22
Source File: api.py    From manila with Apache License 2.0 5 votes vote down vote up
def share_data_get_for_project(context, project_id, user_id,
                               share_type_id=None, session=None):
    query = (model_query(context, models.Share,
                         func.count(models.Share.id),
                         func.sum(models.Share.size),
                         read_deleted="no",
                         session=session).
             filter_by(project_id=project_id))
    if share_type_id:
        query = query.join("instances").filter_by(share_type_id=share_type_id)
    elif user_id:
        query = query.filter_by(user_id=user_id)
    result = query.first()
    return (result[0] or 0, result[1] or 0) 
Example #23
Source File: api.py    From magnum with Apache License 2.0 5 votes vote down vote up
def get_cluster_stats(self, context, project_id=None):
        query = model_query(models.Cluster)
        node_count_col = models.NodeGroup.node_count
        ncfunc = func.sum(node_count_col)

        if project_id:
            query = query.filter_by(project_id=project_id)
            nquery = query.session.query(ncfunc.label("nodes")).filter_by(
                project_id=project_id)
        else:
            nquery = query.session.query(ncfunc.label("nodes"))

        clusters = query.count()
        nodes = int(nquery.one()[0]) if nquery.one()[0] else 0
        return clusters, nodes 
Example #24
Source File: api.py    From magnum with Apache License 2.0 5 votes vote down vote up
def _add_clusters_filters(self, query, filters):
        if filters is None:
            filters = {}

        possible_filters = ["cluster_template_id", "name", "stack_id",
                            "api_address", "node_addresses", "project_id",
                            "user_id"]

        filter_names = set(filters).intersection(possible_filters)
        filter_dict = {filter_name: filters[filter_name]
                       for filter_name in filter_names}

        query = query.filter_by(**filter_dict)

        if 'status' in filters:
            query = query.filter(models.Cluster.status.in_(filters['status']))

        # Helper to filter based on node_count field from nodegroups
        def filter_node_count(query, node_count, is_master=False):
            nfunc = func.sum(models.NodeGroup.node_count)
            nquery = model_query(models.NodeGroup)
            if is_master:
                nquery = nquery.filter(models.NodeGroup.role == 'master')
            else:
                nquery = nquery.filter(models.NodeGroup.role != 'master')
            nquery = nquery.group_by(models.NodeGroup.cluster_id)
            nquery = nquery.having(nfunc == node_count)
            uuids = [ng.cluster_id for ng in nquery.all()]
            return query.filter(models.Cluster.uuid.in_(uuids))

        if 'node_count' in filters:
            query = filter_node_count(
                query, filters['node_count'], is_master=False)
        if 'master_count' in filters:
            query = filter_node_count(
                query, filters['master_count'], is_master=True)

        return query 
Example #25
Source File: transfer_account.py    From SempoBlockchain with GNU General Public License v3.0 5 votes vote down vote up
def total_received(self):
        return int(
            db.session.query(func.sum(server.models.credit_transfer.CreditTransfer.transfer_amount).label('total')).execution_options(show_all=True)
            .filter(server.models.credit_transfer.CreditTransfer.transfer_status == TransferStatusEnum.COMPLETE)
            .filter(server.models.credit_transfer.CreditTransfer.recipient_transfer_account_id == self.id).first().total or 0
        ) 
Example #26
Source File: transfer_account.py    From SempoBlockchain with GNU General Public License v3.0 5 votes vote down vote up
def total_sent(self):
        return int(
            db.session.query(func.sum(server.models.credit_transfer.CreditTransfer.transfer_amount).label('total')).execution_options(show_all=True)
            .filter(server.models.credit_transfer.CreditTransfer.transfer_status == TransferStatusEnum.COMPLETE)
            .filter(server.models.credit_transfer.CreditTransfer.sender_transfer_account_id == self.id).first().total or 0
        ) 
Example #27
Source File: account_limit.py    From rucio with Apache License 2.0 5 votes vote down vote up
def get_global_account_usage(account, rse_expression=None, session=None):
    """
    Read the account usage and connect it with the global account limits of the account.

    :param account:          The account to read.
    :param rse_expression:   The RSE expression (If none, get all).
    :param session:          Database session in use.

    :returns:                List of dicts {'rse_id', 'bytes_used', 'files_used', 'bytes_limit'}
    """
    result_list = []
    if not rse_expression:
        # All RSE Expressions
        limits = get_global_account_limits(account=account, session=session)
        all_rse_usages = {usage['rse_id']: (usage['bytes'], usage['files']) for usage in get_all_rse_usages_per_account(account=account, session=session)}
        for rse_expression, limit in limits.items():
            usage = 0
            files = 0
            for rse in limit['resolved_rse_ids']:
                usage += all_rse_usages.get(rse, [0])[0]
                files += all_rse_usages.get(rse, [0, 0])[1]
            result_list.append({'rse_expression': rse_expression,
                                'bytes': usage, 'files': files,
                                'bytes_limit': limit['limit'],
                                'bytes_remaining': limit['limit'] - usage})
    else:
        # One RSE Expression
        limit = get_global_account_limit(account=account, rse_expression=rse_expression, session=session)
        resolved_rses = [resolved_rse['id'] for resolved_rse in parse_expression(rse_expression, session=session)]
        usage = session.query(func.sum(models.AccountUsage.bytes), func.sum(models.AccountUsage.files))\
                       .filter(models.AccountUsage.account == account, models.AccountUsage.rse_id.in_(resolved_rses))\
                       .group_by(models.AccountUsage.account).first()
        result_list.append({'rse_expression': rse_expression,
                            'bytes': usage[0], 'files': usage[1],
                            'bytes_limit': limit,
                            'bytes_remaining': limit - usage[0]})
    return result_list 
Example #28
Source File: rule.py    From rucio with Apache License 2.0 5 votes vote down vote up
def re_evaluate_did(scope, name, rule_evaluation_action, session=None):
    """
    Re-Evaluates a did.

    :param scope:                   The scope of the did to be re-evaluated.
    :param name:                    The name of the did to be re-evaluated.
    :param rule_evaluation_action:  The Rule evaluation action.
    :param session:                 The database session in use.
    :raises:                        DataIdentifierNotFound
    """

    try:
        did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope,
                                                          models.DataIdentifier.name == name).one()
    except NoResultFound:
        raise DataIdentifierNotFound()

    if rule_evaluation_action == DIDReEvaluation.ATTACH:
        __evaluate_did_attach(did, session=session)
    else:
        __evaluate_did_detach(did, session=session)

    # Update size and length of did
    if session.bind.dialect.name == 'oracle':
        stmt = session.query(func.sum(models.DataIdentifierAssociation.bytes),
                             func.count(1)).\
            with_hint(models.DataIdentifierAssociation,
                      "index(CONTENTS CONTENTS_PK)", 'oracle').\
            filter(models.DataIdentifierAssociation.scope == scope,
                   models.DataIdentifierAssociation.name == name)
        for bytes, length in stmt:
            did.bytes = bytes
            did.length = length

    # Add an updated_col_rep
    if did.did_type == DIDType.DATASET:
        models.UpdatedCollectionReplica(scope=scope,
                                        name=name,
                                        did_type=did.did_type).save(session=session) 
Example #29
Source File: inventory_entry.py    From cog with GNU Affero General Public License v3.0 5 votes vote down vote up
def quantity(self):
        """Returns quantity of items that have not been 'claimed' by a request"""
        requests = RequestItem.query \
                    .filter_by(entry_id=self.id) \
                    .join(hardwarecheckout.models.request.Request) \
                    .filter_by(status=hardwarecheckout.models.request.RequestStatus.APPROVED) \
                    .with_entities(func.sum(RequestItem.quantity)).scalar()
        if not requests: requests = 0
        return Item.query.filter_by(entry_id = self.id, user = None).count() - requests 
Example #30
Source File: test_cte.py    From sqlalchemy with MIT License 4 votes vote down vote up
def test_recursive_inner_cte_unioned_to_alias(self):
        parts = table(
            "parts", column("part"), column("sub_part"), column("quantity")
        )

        included_parts = (
            select([parts.c.sub_part, parts.c.part, parts.c.quantity])
            .where(parts.c.part == "our part")
            .cte(recursive=True)
        )

        incl_alias = included_parts.alias("incl")
        parts_alias = parts.alias()
        included_parts = incl_alias.union(
            select(
                [
                    parts_alias.c.sub_part,
                    parts_alias.c.part,
                    parts_alias.c.quantity,
                ]
            ).where(parts_alias.c.part == incl_alias.c.sub_part)
        )

        s = (
            select(
                [
                    included_parts.c.sub_part,
                    func.sum(included_parts.c.quantity).label(
                        "total_quantity"
                    ),
                ]
            )
            .select_from(
                included_parts.join(
                    parts, included_parts.c.part == parts.c.part
                )
            )
            .group_by(included_parts.c.sub_part)
        )
        self.assert_compile(
            s,
            "WITH RECURSIVE incl(sub_part, part, quantity) "
            "AS (SELECT parts.sub_part AS sub_part, parts.part "
            "AS part, parts.quantity AS quantity FROM parts "
            "WHERE parts.part = :part_1 UNION "
            "SELECT parts_1.sub_part AS sub_part, "
            "parts_1.part AS part, parts_1.quantity "
            "AS quantity FROM parts AS parts_1, incl "
            "WHERE parts_1.part = incl.sub_part) "
            "SELECT incl.sub_part, "
            "sum(incl.quantity) AS total_quantity FROM incl "
            "JOIN parts ON incl.part = parts.part "
            "GROUP BY incl.sub_part",
        )