Python django.db.models.Min() Examples

The following are 30 code examples of django.db.models.Min(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module django.db.models , or try the search function .
Example #1
Source File: v1.py    From lexpredict-contraxsuite with GNU Affero General Public License v3.0 6 votes vote down vote up
def get(self, request, *args, **kwargs):
        qs = self.get_queryset()
        data = list(qs)

        max_value = qs.aggregate(m=Max('count'))['m']

        min_date = qs.aggregate(m=Min('date'))['m']
        max_date = qs.aggregate(m=Max('date'))['m']

        for item in data:
            item['weight'] = item['count'] / max_value
            # TODO: update url
            item['url'] = '{}?date_search={}'.format(
                reverse('extract:date-usage-list'), item['date'].isoformat())

        ret = {'data': data,
               'min_year': min_date.year,
               'max_year': max_date.year,
               'context': self.get_context()}

        return JsonResponse(ret) 
Example #2
Source File: generate.py    From swarfarm with Apache License 2.0 6 votes vote down vote up
def get_item_report(qs, total_log_count, **kwargs):
    if qs.count() == 0:
        return None

    min_count = kwargs.get('min_count', max(1, int(MINIMUM_THRESHOLD * total_log_count)))

    results = list(
        qs.values(
            'item',
            name=F('item__name'),
            icon=F('item__icon'),
        ).annotate(
            count=Count('pk'),
            min=Min('quantity'),
            max=Max('quantity'),
            avg=Avg('quantity'),
            drop_chance=Cast(Count('pk'), FloatField()) / total_log_count * 100,
            qty_per_100=Cast(Sum('quantity'), FloatField()) / total_log_count * 100,
        ).filter(count__gt=min_count).order_by('-count')
    )

    return results 
Example #3
Source File: test_ocp_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_upsert_monthly_cluster_cost_line_item_no_report_period(self):
        """Test that the cluster monthly costs are not updated when no report period  is found."""
        report_table_name = OCP_REPORT_TABLE_MAP["report"]
        report_table = getattr(self.accessor.report_schema, report_table_name)
        rate = 1000

        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(Min("interval_start"), Max("interval_start"))
            start_date = report_entry["interval_start__min"]
            end_date = report_entry["interval_start__max"]

            start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
            end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
            self.accessor.upsert_monthly_cluster_cost_line_item(
                start_date, end_date, self.cluster_id, "cluster_alias", metric_constants.SUPPLEMENTARY_COST_TYPE, rate
            )
            summary_table_name = OCP_REPORT_TABLE_MAP["line_item_daily_summary"]
            query = self.accessor._get_db_obj_query(summary_table_name)
            self.assertFalse(query.filter(cluster_id=self.cluster_id).exists()) 
Example #4
Source File: test_ocp_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_upsert_monthly_cluster_cost_line_item(self):
        """Test that the cluster monthly costs are not updated."""
        report_table_name = OCP_REPORT_TABLE_MAP["report"]
        report_table = getattr(self.accessor.report_schema, report_table_name)
        rate = 1000
        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(Min("interval_start"), Max("interval_start"))
            start_date = report_entry["interval_start__min"]
            end_date = report_entry["interval_start__max"]

            start_date = str(self.reporting_period.report_period_start)
            end_date = str(self.reporting_period.report_period_end)
            self.accessor.upsert_monthly_cluster_cost_line_item(
                start_date, end_date, self.cluster_id, "cluster_alias", metric_constants.SUPPLEMENTARY_COST_TYPE, rate
            )
            summary_table_name = OCP_REPORT_TABLE_MAP["line_item_daily_summary"]
            query = self.accessor._get_db_obj_query(summary_table_name)
            self.assertEqual(query.filter(cluster_id=self.cluster_id).first().supplementary_monthly_cost, rate) 
Example #5
Source File: test_ocp_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def _populate_storage_summary(self, cluster_id=None):
        """Generate storage summary data."""
        report_table_name = OCP_REPORT_TABLE_MAP["report"]
        report_table = getattr(self.accessor.report_schema, report_table_name)
        if cluster_id is None:
            cluster_id = self.cluster_id
        for _ in range(25):
            pod = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
            namespace = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
            self.creator.create_ocp_usage_line_item(self.reporting_period, self.report, pod=pod, namespace=namespace)
            self.creator.create_ocp_storage_line_item(self.reporting_period, self.report, pod=pod, namespace=namespace)
        self.creator.create_ocp_node_label_line_item(self.reporting_period, self.report)
        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(Min("interval_start"), Max("interval_start"))
            start_date = report_entry["interval_start__min"]
            end_date = report_entry["interval_start__max"]

        start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
        end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

        self.accessor.populate_node_label_line_item_daily_table(start_date, end_date, cluster_id)
        self.accessor.populate_line_item_daily_table(start_date, end_date, cluster_id)

        self.accessor.populate_storage_line_item_daily_table(start_date, end_date, cluster_id)
        self.accessor.populate_storage_line_item_daily_summary_table(start_date, end_date, cluster_id) 
Example #6
Source File: test_ocp_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def _populate_pod_summary(self):
        """Generate pod summary data."""
        report_table_name = OCP_REPORT_TABLE_MAP["report"]
        report_table = getattr(self.accessor.report_schema, report_table_name)

        cluster_id = "testcluster"
        for _ in range(25):
            self.creator.create_ocp_usage_line_item(self.reporting_period, self.report)

        self.creator.create_ocp_node_label_line_item(self.reporting_period, self.report)
        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(Min("interval_start"), Max("interval_start"))
            start_date = report_entry["interval_start__min"]
            end_date = report_entry["interval_start__max"]

        start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
        end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

        self.accessor.populate_node_label_line_item_daily_table(start_date, end_date, cluster_id)
        self.accessor.populate_line_item_daily_table(start_date, end_date, cluster_id)
        self.accessor.populate_line_item_daily_summary_table(start_date, end_date, cluster_id)
        return (start_date, end_date) 
Example #7
Source File: postgres_to_influx.py    From chain-api with MIT License 6 votes vote down vote up
def migrate_data(offset, limit=float('inf')):
    '''Returns objects between offset and offset+limit'''
    #queryset = ScalarData.objects.filter(
    #    timestamp__lt=FIRST_TIMESTAMP.isoformat())
    #print('Calculating min and max IDs...')
    #min_max = queryset.aggregate(min=models.Min('id'), max=models.Max('id'))
    #min_id = min_max['min']
    #max_id = min_max['max']
    min_id = 0
    max_id = 1068348868
    print('Got min ID {0} and max ID {0}'.format(min_id, max_id))
    moved = 0
    for i in range(min_id, max_id+1, BATCH_SIZE):
        print('Start moving objects[{0}:{1}]...'.format(
              i, i+BATCH_SIZE), end='')
        stdout.flush()
        moved_count = post_points(ScalarData.objects.filter(id__range=(i, i+BATCH_SIZE-1)))
        print('Moved {0} objects'.format(moved_count))
        stdout.flush()
        moved += moved_count
        if moved >= limit:
            break 
Example #8
Source File: test_azure_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_populate_markup_cost_no_billsids(self):
        """Test that the daily summary table is populated."""
        summary_table_name = AZURE_REPORT_TABLE_MAP["line_item_daily_summary"]
        summary_table = getattr(self.accessor.report_schema, summary_table_name)

        query = self.accessor._get_db_obj_query(summary_table_name)
        with schema_context(self.schema):
            expected_markup = query.aggregate(markup=Sum(F("pretax_cost") * decimal.Decimal(0.1)))
            expected_markup = expected_markup.get("markup")
            summary_entry = summary_table.objects.all().aggregate(Min("usage_start"), Max("usage_start"))
            start_date = summary_entry["usage_start__min"]
            end_date = summary_entry["usage_start__max"]

        self.accessor.populate_markup_cost(0.1, start_date, end_date, None)
        with schema_context(self.schema):
            query = self.accessor._get_db_obj_query(summary_table_name).aggregate(Sum("markup_cost"))
            actual_markup = query.get("markup_cost__sum")
            self.assertAlmostEqual(actual_markup, expected_markup, 6) 
Example #9
Source File: tests.py    From django-sqlserver with MIT License 6 votes vote down vote up
def test_query_annotation(self):
        # Only min and max make sense for datetimes.
        morning = Session.objects.create(name='morning')
        afternoon = Session.objects.create(name='afternoon')
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
        morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
        afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
            [morning_min_dt, afternoon_min_dt],
            transform=lambda d: d.dt)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
            [morning_min_dt],
            transform=lambda d: d.dt)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
            [afternoon_min_dt],
            transform=lambda d: d.dt) 
Example #10
Source File: test_aws_report_db_accessor.py    From koku with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_populate_markup_cost_no_billsids(self):
        """Test that the daily summary table is populated."""
        summary_table_name = AWS_CUR_TABLE_MAP["line_item_daily_summary"]
        summary_table = getattr(self.accessor.report_schema, summary_table_name)

        query = self.accessor._get_db_obj_query(summary_table_name)
        with schema_context(self.schema):
            expected_markup = query.aggregate(markup=Sum(F("unblended_cost") * decimal.Decimal(0.1)))
            expected_markup = expected_markup.get("markup")

            summary_entry = summary_table.objects.all().aggregate(Min("usage_start"), Max("usage_start"))
            start_date = summary_entry["usage_start__min"]
            end_date = summary_entry["usage_start__max"]

        self.accessor.populate_markup_cost(0.1, start_date, end_date, None)
        with schema_context(self.schema):
            query = self.accessor._get_db_obj_query(summary_table_name).aggregate(Sum("markup_cost"))
            actual_markup = query.get("markup_cost__sum")
            self.assertAlmostEqual(actual_markup, expected_markup, 6) 
Example #11
Source File: tests.py    From django-sqlserver with MIT License 6 votes vote down vote up
def test_query_annotation(self):
        # Only min and max make sense for datetimes.
        morning = Session.objects.create(name='morning')
        afternoon = Session.objects.create(name='afternoon')
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
        SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
        morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
        afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
            [morning_min_dt, afternoon_min_dt],
            transform=lambda d: d.dt)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
            [morning_min_dt],
            transform=lambda d: d.dt)
        self.assertQuerysetEqual(
            Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
            [afternoon_min_dt],
            transform=lambda d: d.dt) 
Example #12
Source File: 0244_message_copy_pub_date_to_date_sent.py    From zulip with Apache License 2.0 6 votes vote down vote up
def copy_pub_date_to_date_sent(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
    Message = apps.get_model('zerver', 'Message')
    if not Message.objects.exists():
        # Nothing to do
        return

    first_uncopied_id = Message.objects.filter(date_sent__isnull=True,
                                               ).aggregate(Min('id'))['id__min']
    # Note: the below id can fall in a segment
    # where date_sent = pub_date already, but it's not a big problem
    # this will just do some redundant UPDATEs.
    last_id = Message.objects.latest("id").id

    id_range_lower_bound = first_uncopied_id
    id_range_upper_bound = first_uncopied_id + BATCH_SIZE
    while id_range_upper_bound <= last_id:
        sql_copy_pub_date_to_date_sent(id_range_lower_bound, id_range_upper_bound)
        id_range_lower_bound = id_range_upper_bound + 1
        id_range_upper_bound = id_range_lower_bound + BATCH_SIZE
        time.sleep(0.1)

    if last_id > id_range_lower_bound:
        # Copy for the last batch.
        sql_copy_pub_date_to_date_sent(id_range_lower_bound, last_id) 
Example #13
Source File: 0239_usermessage_copy_id_to_bigint_id.py    From zulip with Apache License 2.0 6 votes vote down vote up
def copy_id_to_bigid(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
    UserMessage = apps.get_model('zerver', 'UserMessage')
    if not UserMessage.objects.exists():
        # Nothing to do
        return

    #  TODO: is  the below lookup fast enough, considering there's no index on bigint_id?
    first_uncopied_id = UserMessage.objects.filter(bigint_id__isnull=True,
                                                   ).aggregate(Min('id'))['id__min']
    # Note: the below id can fall in a segment
    # where bigint_id = id already, but it's not a big problem
    # this will just do some redundant UPDATEs.
    last_id = UserMessage.objects.latest("id").id

    id_range_lower_bound = first_uncopied_id
    id_range_upper_bound = first_uncopied_id + BATCH_SIZE
    while id_range_upper_bound <= last_id:
        sql_copy_id_to_bigint_id(id_range_lower_bound, id_range_upper_bound)
        id_range_lower_bound = id_range_upper_bound + 1
        id_range_upper_bound = id_range_lower_bound + BATCH_SIZE
        time.sleep(0.1)

    if last_id > id_range_lower_bound:
        # Copy for the last batch.
        sql_copy_id_to_bigint_id(id_range_lower_bound, last_id) 
Example #14
Source File: models.py    From scale with Apache License 2.0 6 votes vote down vote up
def get_queue_status(self):
        """Returns the current status of the queue with statistics broken down by job type.

        :returns: A list of each job type with calculated statistics.
        :rtype: list[:class:`queue.models.QueueStatus`]
        """

        status_dicts = Queue.objects.values(*['job_type__%s' % f for f in JobType.BASE_FIELDS])
        status_dicts = status_dicts.annotate(count=models.Count('job_type'), longest_queued=models.Min('queued'),
                                             highest_priority=models.Min('priority'))
        status_dicts = status_dicts.order_by('job_type__is_paused', 'highest_priority', 'longest_queued')

        # Convert each result to a real job type model with added statistics
        results = []
        for status_dict in status_dicts:
            job_type_dict = {f: status_dict['job_type__%s' % f] for f in JobType.BASE_FIELDS}
            job_type = JobType(**job_type_dict)

            status = QueueStatus(job_type, status_dict['count'], status_dict['longest_queued'],
                                 status_dict['highest_priority'])
            results.append(status)
        return results 
Example #15
Source File: math.py    From open-context-py with GNU General Public License v3.0 6 votes vote down vote up
def get_numeric_range(self, predicate_uuids, children_ok=True):
        if not isinstance(predicate_uuids, list):
            predicate_uuids = [str(predicate_uuids)]
        sum_ass = Assertion.objects\
                           .filter(predicate_uuid__in=predicate_uuids)\
                           .aggregate(Min('data_num'),
                                      Max('data_num'),
                                      Count('hash_id'),
                                      Avg('data_num'))
        output = {}
        output['avg'] = sum_ass['data_num__avg']
        output['min'] = sum_ass['data_num__min']
        output['max'] = sum_ass['data_num__max']
        output['count'] = sum_ass['hash_id__count']
        if output['count'] == 0 and children_ok:
            output = self.get_numeric_range_from_children(predicate_uuids)
        return output 
Example #16
Source File: views.py    From osler with GNU General Public License v3.0 6 votes vote down vote up
def inactive_ai_patients_filter(qs):
    '''Build a queryset of patients for those that have active action
    items due in the future.
    '''

    future_ai_pts = coremodels.Patient.objects.filter(
        actionitem__in=coremodels.ActionItem.objects
            .filter(due_date__gt=django.utils.timezone.now().date())
            .filter(completion_date=None)
            .select_related('patient')
        ).annotate(soonest_due_date=Min('actionitem__due_date'))

    future_referral_pts = coremodels.Patient.objects.filter(
        followuprequest__in=referrals.FollowupRequest.objects
            .filter(due_date__gt=django.utils.timezone.now().date())
            .filter(completion_date=None)
            .select_related('patient')
        ).annotate(soonest_due_date=Min('followuprequest__due_date'))

    out_list = merge_pt_querysets_by_soonest_date(future_ai_pts, future_referral_pts)

    return out_list 
Example #17
Source File: user.py    From online-judge with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_context_data(self, **kwargs):
        context = super(UserPage, self).get_context_data(**kwargs)

        context['hide_solved'] = int(self.hide_solved)
        context['authored'] = self.object.authored_problems.filter(is_public=True, is_organization_private=False) \
                                  .order_by('code')
        rating = self.object.ratings.order_by('-contest__end_time')[:1]
        context['rating'] = rating[0] if rating else None

        context['rank'] = Profile.objects.filter(
            is_unlisted=False, performance_points__gt=self.object.performance_points,
        ).count() + 1

        if rating:
            context['rating_rank'] = Profile.objects.filter(
                is_unlisted=False, rating__gt=self.object.rating,
            ).count() + 1
            context['rated_users'] = Profile.objects.filter(is_unlisted=False, rating__isnull=False).count()
        context.update(self.object.ratings.aggregate(min_rating=Min('rating'), max_rating=Max('rating'),
                                                     contests=Count('contest')))
        return context 
Example #18
Source File: views.py    From pythonic-news with GNU Affero General Public License v3.0 6 votes vote down vote up
def threads(request):
    page = int(request.GET.get('p', 0))
    paging_size = settings.PAGING_SIZE
    tree = Comment.objects.filter( tree_id=OuterRef('tree_id'), user=OuterRef('user')).values('tree_id', 'user__pk').annotate(min_level=Min('level')).order_by()
    stories = Comment.objects.filter(
        user=request.user
    ).filter(
        Q(level__in=Subquery(tree.values('min_level'), output_field=models.IntegerField()))  # TODO: level= or level__in= ???
    ).select_related(
        'user', 'parent', 'to_story'
    ).order_by(
        '-created_at'
    )[(page*paging_size):(page+1)*(paging_size)]
    if len(stories) < 1 and page != 0:
        back = _one_page_back(request)
        if back:
            return back
    return render(request, 'news/index.html', {'stories': stories, 'hide_text':False, 'page': page, 'rank_start': None, 'show_children': True}) 
Example #19
Source File: 0004_make_focal_point_key_not_nullable.py    From wagtail with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def remove_duplicate_renditions(apps, schema_editor):
    Rendition = apps.get_model('wagtailimages.Rendition')

    # Find all filter_id / image_id pairings that appear multiple times in the renditions table
    # with focal_point_key = NULL
    duplicates = (
        Rendition.objects.filter(focal_point_key__isnull=True).
        values('image_id', 'filter_id').
        annotate(count_id=models.Count('id'), min_id=models.Min('id')).
        filter(count_id__gt=1)
    )

    # Delete all occurrences of those pairings, except for the one with the lowest ID
    for duplicate in duplicates:
        Rendition.objects.filter(
            focal_point_key__isnull=True,
            image=duplicate['image_id'],
            filter=duplicate['filter_id']
        ).exclude(
            id=duplicate['min_id']
        ).delete() 
Example #20
Source File: timetable.py    From conf_site with MIT License 6 votes vote down vote up
def __iter__(self):
        times = sorted(
            set(itertools.chain(*self.slots_qs().values_list("start", "end")))
        )
        slots = Slot.objects.filter(pk__in=self.slots_qs().values("pk"))
        slots = slots.annotate(
            room_count=Count("slotroom"), order=Min("slotroom__room__order")
        )
        slots = slots.order_by("start", "order")
        row = []
        for time, next_time in pairwise(times):
            row = {"time": time, "slots": []}
            for slot in slots:
                if slot.start == time:
                    slot.rowspan = TimeTable.rowspan(
                        times, slot.start, slot.end
                    )
                    slot.colspan = slot.room_count
                    row["slots"].append(slot)
            if row["slots"] or next_time is None:
                yield row 
Example #21
Source File: data_log.py    From swarfarm with Apache License 2.0 6 votes vote down vote up
def get_context_data(self, **kwargs):
        if self.get_log_count():
            bin_width = 50000
            damage_stats = self.get_queryset().aggregate(min=Min('total_damage'), max=Max('total_damage'))
            bin_start = floor_to_nearest(damage_stats['min'], bin_width)
            bin_end = ceil_to_nearest(damage_stats['max'], bin_width)
            damage_histogram = {
                'type': 'histogram',
                'width': bin_width,
                'data': histogram(self.get_queryset(), 'total_damage', range(bin_start, bin_end, bin_width)),
            }
        else:
            damage_histogram = None

        context = {
            'dungeon': self.get_dungeon(),
            'level': self.get_level(),
            'report': drop_report(self.get_queryset(), min_count=0),
            'damage_histogram': damage_histogram
        }

        context.update(kwargs)
        return super().get_context_data(**context) 
Example #22
Source File: legacy_ioi.py    From online-judge with GNU Affero General Public License v3.0 5 votes vote down vote up
def update_participation(self, participation):
        cumtime = 0
        score = 0
        format_data = {}

        queryset = (participation.submissions.values('problem_id')
                                             .filter(points=Subquery(
                                                 participation.submissions.filter(problem_id=OuterRef('problem_id'))
                                                                          .order_by('-points').values('points')[:1]))
                                             .annotate(time=Min('submission__date'))
                                             .values_list('problem_id', 'time', 'points'))

        for problem_id, time, points in queryset:
            if self.config['cumtime']:
                dt = (time - participation.start).total_seconds()
                if points:
                    cumtime += dt
            else:
                dt = 0

            format_data[str(problem_id)] = {'points': points, 'time': dt}
            score += points

        participation.cumtime = max(cumtime, 0)
        participation.score = score
        participation.tiebreaker = 0
        participation.format_data = format_data
        participation.save() 
Example #23
Source File: models.py    From GloboNetworkAPI with Apache License 2.0 5 votes vote down vote up
def get_expect_strings(cls):
        try:
            query = (HealthcheckExpect.objects.values('expect_string')
                     .annotate(id=models.Min('id')))

            return list(query)

        except ObjectDoesNotExist, e:
            cls.log.error(u'Healthchecks Does Not Exists.')
            raise HealthcheckExpectNotFoundError(
                e, u'Erro ao pequisar Healthcheks_expects'
            ) 
Example #24
Source File: prunenotifications.py    From healthchecks with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def handle(self, *args, **options):
        total = 0

        q = Check.objects.filter(n_pings__gt=100)
        q = q.annotate(min_ping_date=Min("ping__created"))
        for check in q:
            qq = Notification.objects.filter(
                owner_id=check.id, created__lt=check.min_ping_date
            )

            num_deleted, _ = qq.delete()
            total += num_deleted

        return "Done! Pruned %d notifications." % total 
Example #25
Source File: data_log.py    From swarfarm with Apache License 2.0 5 votes vote down vote up
def get_grade_statistics(self):
        return super().get_queryset().aggregate(min=Min('grade'), avg=Avg('grade'), max=Max('grade')) 
Example #26
Source File: tests.py    From django-sqlserver with MIT License 5 votes vote down vote up
def test_annotate_with_aggregation_in_value(self):
        self.assertQuerysetEqual(
            CaseTestModel.objects.values(*self.non_lob_fields).annotate(
                min=Min('fk_rel__integer'),
                max=Max('fk_rel__integer'),
            ).annotate(
                test=Case(
                    When(integer=2, then='min'),
                    When(integer=3, then='max'),
                ),
            ).order_by('pk'),
            [(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
            transform=itemgetter('integer', 'test', 'min', 'max')
        ) 
Example #27
Source File: abstract_models.py    From django-oscar-wagtail with MIT License 5 votes vote down vote up
def get_root_nodes(cls):
        content_type = ContentType.objects.get_for_model(cls)
        depth = (
            cls.objects
            .filter(content_type=content_type)
            .aggregate(depth=models.Min('depth')))['depth']

        if depth is not None:
            return cls.objects.filter(content_type=content_type, depth=depth)
        return cls.objects.filter(content_type=content_type) 
Example #28
Source File: views.py    From cartoview with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def move_down(request, app_id):
    app = App.objects.get(id=app_id)
    next_app = App.objects.get(
        order=App.objects.filter(
            order__gt=app.order).aggregate(Min('order'))['order__min'])
    order = app.order
    app.order = next_app.order
    next_app.order = order
    app.save()
    next_app.save()
    return HttpResponse(
        json.dumps({
            "success": True
        }), content_type="application/json") 
Example #29
Source File: math.py    From open-context-py with GNU General Public License v3.0 5 votes vote down vote up
def get_date_range(self, predicate_uuids):
        if not isinstance(predicate_uuids, list):
            predicate_uuids = [str(predicate_uuids)]
        sum_ass = Assertion.objects\
                           .filter(predicate_uuid__in=predicate_uuids)\
                           .aggregate(Min('data_date'),
                                      Max('data_date'),
                                      Count('hash_id'))
        output = {}
        output['min'] = sum_ass['data_date__min']
        output['max'] = sum_ass['data_date__max']
        output['count'] = sum_ass['hash_id__count']
        return output 
Example #30
Source File: geochrono.py    From open-context-py with GNU General Public License v3.0 5 votes vote down vote up
def get_project_date_range_db(self, project_uuid):
        """ gets a list of parent items """
        project_uuids = self.add_child_uuids_of_projects(project_uuid)
        date_range = Event.objects\
                          .filter(project_uuid__in=project_uuids)\
                          .aggregate(Min('start'),
                                     Max('stop'),
                                     Count('hash_id'))
        if date_range['start__min'] is None or date_range['stop__max'] is None:
            date_range = None
        else:
            date_range['start'] = float(date_range['start__min'])
            date_range['stop'] = float(date_range['stop__max'])
        return date_range