Python sqlalchemy.sql.expression.asc() Examples

The following are 9 code examples of sqlalchemy.sql.expression.asc(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlalchemy.sql.expression , or try the search function .
Example #1
Source File: ordering.py    From babbage with MIT License 6 votes vote down vote up
def apply(self, q, bindings, ordering, distinct=None):
        """ Sort on a set of field specifications of the type (ref, direction)
        in order of the submitted list. """
        info = []
        for (ref, direction) in self.parse(ordering):
            info.append((ref, direction))
            table, column = self.cube.model[ref].bind(self.cube)
            if distinct is not None and distinct != ref:
                column = asc(ref) if direction == 'asc' else desc(ref)
            else:
                column = column.label(column.name)
                column = column.asc() if direction == 'asc' else column.desc()
                bindings.append(Binding(table, ref))
            if self.cube.is_postgresql:
                column = column.nullslast()
            q = q.order_by(column)

        if not len(self.results):
            for column in q.columns:
                column = column.asc()
                if self.cube.is_postgresql:
                    column = column.nullslast()
                q = q.order_by(column)
        return info, q, bindings 
Example #2
Source File: repo_status.py    From gitmostwanted.com with MIT License 6 votes vote down vote up
def status_detect(num_days, num_segments):
    repos = Repo.query.filter(Repo.status == 'unknown')
    for repo in repos:
        result = db.session.query(RepoStars.day, RepoStars.stars)\
            .filter(RepoStars.repo_id == repo.id)\
            .order_by(expression.asc(RepoStars.day))\
            .limit(num_days)\
            .all()

        val = 0 if not result else repo_mean(
            result, num_days, num_segments, last_known_mean(repo.id)
        )

        status_old = repo.status
        repo.status = 'hopeless' if val < 1 else 'promising'

        log.info(
            'Repository status of {0}({1}) has been changed to {2} (was: {3})'
            .format(repo.id, repo.full_name, repo.status, status_old)
        )

        db.session.merge(
            RepoMean(repo=repo, value=val, created_at=datetime.today().strftime('%Y-%m-%d'))
        )
        db.session.commit() 
Example #3
Source File: ordering.py    From babbage with MIT License 5 votes vote down vote up
def order(self, ast):
        if isinstance(ast, six.string_types):
            ref, direction = ast, 'asc'
        else:
            ref, direction = ast[0], ast[2]
        if ref not in self.cube.model:
            raise QueryException('Invalid sorting criterion: %r' % ast)
        self.results.append((ref, direction)) 
Example #4
Source File: dialect.py    From sqlalchemy-teradata with MIT License 5 votes vote down vote up
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        """
        Override
        TODO: Check if we need PRIMARY Indices or PRIMARY KEY Indices
        TODO: Check for border cases (No PK Indices)
        """

        if schema is None:
            schema = self.default_schema_name

        stmt = select([column('ColumnName'), column('IndexName')],
                      from_obj=[text('dbc.Indices')]).where(
                          and_(text('DatabaseName = :schema'),
                              text('TableName=:table'),
                              text('IndexType=:indextype'))
                      ).order_by(asc(column('IndexNumber')))

        # K for Primary Key
        res = connection.execute(stmt, schema=schema, table=table_name, indextype='K').fetchall()

        index_columns = list()
        index_name = None

        for index_column in res:
            index_columns.append(self.normalize_name(index_column['ColumnName']))
            index_name = self.normalize_name(index_column['IndexName']) # There should be just one IndexName

        return {
            "constrained_columns": index_columns,
            "name": index_name
        } 
Example #5
Source File: dialect.py    From sqlalchemy-teradata with MIT License 5 votes vote down vote up
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
        """
        Overrides base class method
        """
        if schema is None:
            schema = self.default_schema_name

        stmt = select([column('ColumnName'), column('IndexName')], from_obj=[text('dbc.Indices')]) \
            .where(and_(text('DatabaseName = :schema'),
                        text('TableName=:table'),
                        text('IndexType=:indextype'))) \
            .order_by(asc(column('IndexName')))

        # U for Unique
        res = connection.execute(stmt, schema=schema, table=table_name, indextype='U').fetchall()

        def grouper(fk_row):
            return {
                'name': self.normalize_name(fk_row['IndexName']),
            }

        unique_constraints = list()
        for constraint_info, constraint_cols in groupby(res, grouper):
            unique_constraint = {
                'name': self.normalize_name(constraint_info['name']),
                'column_names': list()
            }

            for constraint_col in constraint_cols:
                unique_constraint['column_names'].append(self.normalize_name(constraint_col['ColumnName']))

            unique_constraints.append(unique_constraint)

        return unique_constraints 
Example #6
Source File: dialect.py    From sqlalchemy-teradata with MIT License 5 votes vote down vote up
def get_indexes(self, connection, table_name, schema=None, **kw):
        """
        Overrides base class method
        """

        if schema is None:
            schema = self.default_schema_name

        stmt = select(["*"], from_obj=[text('dbc.Indices')]) \
            .where(and_(text('DatabaseName = :schema'),
                        text('TableName=:table'))) \
            .order_by(asc(column('IndexName')))

        res = connection.execute(stmt, schema=schema, table=table_name).fetchall()

        def grouper(fk_row):
            return {
                'name': fk_row.IndexName or fk_row.IndexNumber, # If IndexName is None TODO: Check what to do
                'unique': True if fk_row.UniqueFlag == 'Y' else False
            }

        # TODO: Check if there's a better way
        indices = list()
        for index_info, index_cols in groupby(res, grouper):
            index_dict = {
                'name': index_info['name'],
                'column_names': list(),
                'unique': index_info['unique']
            }

            for index_col in index_cols:
                index_dict['column_names'].append(self.normalize_name(index_col['ColumnName']))

            indices.append(index_dict)

        return indices 
Example #7
Source File: condition.py    From dodotable with MIT License 5 votes vote down vote up
def __query__(self):
        if self.order == self.DESCENDANT:
            query = desc(self.attribute)
        elif self.order == self.ASCENDANT:
            query = asc(self.attribute)
        return query 
Example #8
Source File: request.py    From rucio with Apache License 2.0 4 votes vote down vote up
def release_waiting_requests_fifo(rse_id, activity=None, count=None, account=None, direction='destination', session=None):
    """
    Release waiting requests. Transfer requests that were requested first, get released first (FIFO).

    :param rse_id:           The RSE id.
    :param activity:         The activity.
    :param count:            The count to be released.
    :param account:          The account name whose requests to release.
    :param direction:        Direction if requests are grouped by source RSE or destination RSE.
    :param session:          The database session.
    """

    dialect = session.bind.dialect.name
    rowcount = 0
    if dialect == 'mysql':
        subquery = session.query(models.Request.id)\
                          .filter(models.Request.state == RequestState.WAITING)\
                          .order_by(asc(models.Request.requested_at))
        if direction == 'destination':
            subquery = subquery.filter(models.Request.dest_rse_id == rse_id)
        elif direction == 'source':
            subquery = subquery.filter(models.Request.source_rse_id == rse_id)

        if activity:
            subquery = subquery.filter(models.Request.activity == activity)
        if account:
            subquery = subquery.filter(models.Request.account == account)
        subquery = subquery.limit(count).subquery()

        # join because IN and LIMIT cannot be used together
        subquery = session.query(models.Request.id)\
                          .join(subquery, models.Request.id == subquery.c.id).subquery()
        # wrap select to update and select from the same table
        subquery = session.query(subquery.c.id).subquery()
        rowcount = session.query(models.Request)\
                          .filter(models.Request.id.in_(subquery))\
                          .update({'state': RequestState.QUEUED},
                                  synchronize_session=False)
    else:
        subquery = session.query(models.Request.id)\
                          .filter(models.Request.state == RequestState.WAITING)
        if direction == 'destination':
            subquery = subquery.filter(models.Request.dest_rse_id == rse_id)
        elif direction == 'source':
            subquery = subquery.filter(models.Request.source_rse_id == rse_id)

        if activity:
            subquery = subquery.filter(models.Request.activity == activity)
        if account:
            subquery = subquery.filter(models.Request.account == account)

        subquery = subquery.order_by(asc(models.Request.requested_at))\
                           .limit(count)
        rowcount = session.query(models.Request)\
                          .filter(models.Request.id.in_(subquery))\
                          .update({'state': RequestState.QUEUED},
                                  synchronize_session=False)
    return rowcount 
Example #9
Source File: dialect.py    From sqlalchemy-teradata with MIT License 4 votes vote down vote up
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        """
        Overrides base class method
        """

        if schema is None:
            schema = self.default_schema_name

        stmt = select([column('IndexID'), column('IndexName'), column('ChildKeyColumn'), column('ParentDB'),
                       column('ParentTable'), column('ParentKeyColumn')],
                      from_obj=[text('DBC.All_RI_ChildrenV')]) \
            .where(and_(text('ChildTable = :table'),
                        text('ChildDB = :schema'))) \
            .order_by(asc(column('IndexID')))

        res = connection.execute(stmt, schema=schema, table=table_name).fetchall()

        def grouper(fk_row):
            return {
                'name': fk_row.IndexName or fk_row.IndexID, #ID if IndexName is None
                'schema': fk_row.ParentDB,
                'table': fk_row.ParentTable
            }

        # TODO: Check if there's a better way
        fk_dicts = list()
        for constraint_info, constraint_cols in groupby(res, grouper):
            fk_dict = {
                'name': constraint_info['name'],
                'constrained_columns': list(),
                'referred_table': constraint_info['table'],
                'referred_schema': constraint_info['schema'],
                'referred_columns': list()
            }

            for constraint_col in constraint_cols:
                fk_dict['constrained_columns'].append(self.normalize_name(constraint_col['ChildKeyColumn']))
                fk_dict['referred_columns'].append(self.normalize_name(constraint_col['ParentKeyColumn']))

            fk_dicts.append(fk_dict)

        return fk_dicts