Python sqlalchemy.sql.func.min() Examples

The following are 10 code examples of sqlalchemy.sql.func.min(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlalchemy.sql.func , or try the search function .
Example #1
Source File: test_overview.py    From marcotti with MIT License 6 votes vote down vote up
def test_timezone_insert(session):
    """Timezone 001: Insert timezone records into Timezones table and verify data."""
    timezones = [
        mco.Timezones(name=u"Europe/Paris", offset=1, confederation=enums.ConfederationType.europe),
        mco.Timezones(name=u"America/New_York", offset=-5.0, confederation=enums.ConfederationType.north_america),
        mco.Timezones(name=u"Asia/Kathmandu", offset=+5.75, confederation=enums.ConfederationType.asia)
    ]
    session.add_all(timezones)

    tz_uefa = session.query(mco.Timezones).filter_by(confederation=enums.ConfederationType.europe).one()
    assert repr(tz_uefa) == "<Timezone(name=Europe/Paris, offset=+1.00, confederation=UEFA)>"

    stmt = session.query(func.min(mco.Timezones.offset).label('far_west')).subquery()
    tz_farwest = session.query(mco.Timezones).filter(mco.Timezones.offset == stmt.c.far_west).one()
    assert repr(tz_farwest) == "<Timezone(name=America/New_York, offset=-5.00, confederation=CONCACAF)>"

    stmt = session.query(func.max(mco.Timezones.offset).label('far_east')).subquery()
    tz_fareast = session.query(mco.Timezones).filter(mco.Timezones.offset == stmt.c.far_east).one()
    assert repr(tz_fareast) == "<Timezone(name=Asia/Kathmandu, offset=+5.75, confederation=AFC)>" 
Example #2
Source File: surveys.py    From dokomoforms with GNU General Public License v3.0 6 votes vote down vote up
def stats(self, survey_id):
        """Get stats for a survey."""
        result = (
            self.session
            .query(
                func.max(Survey.created_on),
                func.min(Submission.save_time),
                func.max(Submission.save_time),
                func.count(Submission.id),
            )
            .select_from(Submission)
            .join(Survey)
            # TODO: ask @jmwohl what this line is supposed to do
            # .filter(User.id == self.current_user_model.id)
            .filter(Submission.survey_id == survey_id)
            .one()
        )

        response = {
            "created_on": result[0],
            "earliest_submission_time": result[1],
            "latest_submission_time": result[2],
            "num_submissions": result[3]
        }
        return response 
Example #3
Source File: base.py    From mma-dexter with Apache License 2.0 6 votes vote down vote up
def _calculate_date_range(self):
        """
        The date range is the range of publication dates for the given
        documents.
        """
        if not self.start_date or not self.end_date:
            if self.doc_ids is None:
                raise ValueError("Need either doc_ids, or both start_date and end_date")

            row = db.session.query(
                func.min(Document.published_at),
                func.max(Document.published_at))\
                .filter(Document.id.in_(self.doc_ids))\
                .first()

            if row and row[0]:
                self.start_date = row[0].date()
                self.end_date = row[1].date()
            else:
                self.start_date = self.end_date = datetime.utcnow()

        self.days = max((self.end_date - self.start_date).days, 1) 
Example #4
Source File: rule.py    From rucio with Apache License 2.0 5 votes vote down vote up
def get_evaluation_backlog(session=None):
    """
    Counts the number of entries in the rule evaluation backlog.
    (Number of files to be evaluated)

    :returns:     Tuple (Count, Datetime of oldest entry)
    """

    result = REGION.get('rule_evaluation_backlog', expiration_time=600)
    if result is NO_VALUE:
        result = session.query(func.count(models.UpdatedDID.created_at), func.min(models.UpdatedDID.created_at)).one()
        REGION.set('rule_evaluation_backlog', result)
    return result 
Example #5
Source File: integrity.py    From sqlalchemy_mptt with MIT License 5 votes vote down vote up
def test_lowest_left_is_always_1(self):
        """ The lowest left key is always 1.

        The following example should return 1.

        .. code-block:: sql

            SELECT MIN(left) FROM tree
        """
        table = self.model
        one = self.session.query(func.min(table.left)).scalar()
        self.assertEqual(one, 1) 
Example #6
Source File: route_stop.py    From gtfsdb with Mozilla Public License 2.0 5 votes vote down vote up
def _find_route_stop_effective_dates(cls, session, route_id):
        """
        find effective start date and end date for all stops of the input route, when
        queried against the trip and stop time tables.  Below are a couple of pure SQL queries that
        perform what I'm doing to get said start and end dates:

        # query all route stops with start & end dates
        SELECT t.route_id, st.stop_id, min(date), max(date)
        FROM ott.universal_calendar u, ott.trips t, ott.stop_times st
        WHERE t.service_id = u.service_id
          AND t.trip_id    = st.trip_id
        GROUP BY t.route_id, st.stop_id

        # query all stops start & end dates for a given route (used below in SQLAlchemy)
        SELECT st.stop_id, min(date), max(date)
        FROM ott.universal_calendar u, ott.trips t, ott.stop_times st
        WHERE t.service_id = u.service_id
          AND t.trip_id    = st.trip_id
          AND st.stop_id   = '1'
        GROUP BY st.stop_id

        :return hash table with stop_id as key, and tuple of (stop_id, start_date, end_date) for all route stops
        """
        ret_val = {}

        # step 1: query the route/stop start and end dates, based on stop time table
        from gtfsdb import UniversalCalendar, StopTime, Trip
        q = session.query(StopTime.stop_id, func.min(UniversalCalendar.date), func.max(UniversalCalendar.date))
        q = q.filter(UniversalCalendar.service_id == Trip.service_id)
        q = q.filter(Trip.trip_id  == StopTime.trip_id)
        q = q.filter(Trip.route_id == route_id)
        q = q.group_by(StopTime.stop_id)
        stop_dates = q.all()

        # step 2: make a hash of these dates with the stop id as the key
        for d in stop_dates:
            ret_val[d[0]] = d

        return ret_val 
Example #7
Source File: route.py    From gtfsdb with Mozilla Public License 2.0 5 votes vote down vote up
def _get_start_end_dates(self):
        """find the min & max date using Trip & UniversalCalendar"""
        if not self.is_cached_data_valid('_start_date'):
            from gtfsdb.model.calendar import UniversalCalendar
            q = self.session.query(func.min(UniversalCalendar.date), func.max(UniversalCalendar.date))
            q = q.filter(UniversalCalendar.trips.any(route_id=self.route_id))
            self._start_date, self._end_date = q.one()
            self.update_cached_data('_start_date')

        return self._start_date, self._end_date 
Example #8
Source File: pysql.py    From spider163 with MIT License 5 votes vote down vote up
def random_data():
    rng = settings.Session.query(func.min(Comment163.id), func.max(Comment163.id)).all()[0]
    data = []
    for i in range(12):
        v = random.uniform(rng[0], rng[1])
        d = settings.engine.execute("select txt,liked,a.author,song_name,a.song_id,b.author from comment163 a inner join music163 b on a.song_id= b.song_id where a.id>" +str(v) + " limit 1").fetchone()
        data.append({"txt": d[0],"like": d[1] ,"author": d[2],  "song" :{"name":d[3], "author": d[5], "id": d[4]}})
    return data 
Example #9
Source File: strava.py    From openmoves with MIT License 4 votes vote down vote up
def derive_move_infos_from_samples(move, samples):
    if len(samples) <= 0:
        return

    move.log_item_count = len(samples)

    # Altitudes
    altitudes = np.asarray([sample.altitude for sample in samples if sample.altitude is not None], dtype=float)
    if len(altitudes) > 0:
        move.altitude_min = np.min(altitudes)
        move.altitude_max = np.max(altitudes)

        # Total ascent / descent
        move.ascent = 0
        move.ascent_time = timedelta(0)
        move.descent = 0
        move.descent_time = timedelta(0)

    previous_sample = None

    # Accumulate values from samples
    for sample in samples:
        # Skip calculation on first sample, sample without altitude info, pause event
        if previous_sample:
            # Calculate altitude and time sums
            if sample.altitude is not None and previous_sample.altitude is not None:
                altitude_diff = sample.altitude - previous_sample.altitude
                time_diff = sample.time - previous_sample.time
                if altitude_diff > 0:
                    move.ascent += altitude_diff
                    move.ascent_time += time_diff
                elif altitude_diff < 0:
                    move.descent += -altitude_diff
                    move.descent_time += time_diff

        # Store sample for next cycle
        previous_sample = sample

    # Temperature
    temperatures = np.asarray([sample.temperature for sample in samples if sample.temperature], dtype=float)
    if len(temperatures) > 0:
        move.temperature_min = np.min(temperatures)
        move.temperature_max = np.max(temperatures)

    # Heart rate
    hrs = np.asarray([sample.hr for sample in samples if sample.hr], dtype=float)
    if len(hrs) > 0:
        move.hr_min = np.min(hrs)
        move.hr_max = np.max(hrs) 
Example #10
Source File: strava.py    From openmoves with MIT License 4 votes vote down vote up
def associate_activities(user, before=None, after=None):
    assert user.has_strava()
    moves_by_date_time = {}
    for id, date_time in db.session.query(Sample.move_id, func.min(Sample.utc)) \
            .join(Move) \
            .filter(Sample.utc != None) \
            .filter(Move.user == user) \
            .group_by(Sample.move_id):
        utc = date_time.replace(tzinfo=pytz.UTC)
        moves_by_date_time[utc] = id
    moves_by_strava_activity_id = {}
    for id, strava_activity_id in db.session.query(Move.id, Move.strava_activity_id) \
            .filter(Move.user == user) \
            .filter(Move.strava_activity_id != None):
        moves_by_strava_activity_id[strava_activity_id] = id
    new_strava_activities = []
    associated_strava_activities = []
    known_strava_activities = []
    client = get_strava_client(user)
    for activity in client.get_activities(before=before, after=after):
        move_id = None
        start_date = activity.start_date
        if activity.id in moves_by_strava_activity_id:
            move_id = moves_by_strava_activity_id[activity.id]
        elif start_date in moves_by_date_time:
            move_id = moves_by_date_time[start_date]
        else:
            for date_time in moves_by_date_time.keys():
                start_date_delta = abs(date_time - start_date)
                start_date_local_delta = abs(date_time - activity.start_date_local.replace(tzinfo=pytz.UTC))
                max_delta = timedelta(seconds=30)

                if start_date_delta <= max_delta or start_date_local_delta <= max_delta:
                    move_id = moves_by_date_time[date_time]
                    break

            if not move_id:
                potential_moves = []
                for date_time in moves_by_date_time.keys():
                    start_date_delta = abs(date_time - start_date)
                    if -MAX_DATE_TIME_OFFSET <= start_date_delta <= MAX_DATE_TIME_OFFSET:
                        potential_moves.append(moves_by_date_time[date_time])

                if len(potential_moves) == 1:
                    move_id = potential_moves[0]
                elif len(potential_moves) > 1:
                    # too many candidates found
                    pass

        if not move_id:
            new_strava_activities.append(activity)
        elif activity.id in moves_by_strava_activity_id:
            known_strava_activities.append(activity)
        else:
            move = Move.query.filter_by(id=move_id).one()
            move.strava_activity_id = activity.id
            db.session.commit()
            associated_strava_activities.append((activity, move))

    return associated_strava_activities, known_strava_activities, new_strava_activities