Python sqlalchemy.sql.expression.true() Examples
The following are 30
code examples of sqlalchemy.sql.expression.true().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlalchemy.sql.expression
, or try the search function
.
Example #1
Source File: automation.py From eNMS with GNU General Public License v3.0 | 6 votes |
def rbac_filter(cls, query, mode, user): service_alias = aliased(models["service"]) public_services = query.filter(models["service"].public == true()) user_services = ( query.join(models["service"].originals.of_type(service_alias)) .join(models["access"], service_alias.access) .join(models["user"], models["access"].users) .filter(models["access"].services_access.contains(mode)) .filter(models["user"].name == user.name) ) user_group_services = ( query.join(models["service"].originals.of_type(service_alias)) .join(models["access"], service_alias.access) .join(models["group"], models["access"].groups) .join(models["user"], models["group"].users) .filter(models["access"].services_access.contains(mode)) .filter(models["user"].name == user.name) ) return public_services.union(user_services, user_group_services)
Example #2
Source File: automation.py From eNMS with GNU General Public License v3.0 | 6 votes |
def filtering_constraints(cls, **kwargs): workflow_id, constraints = kwargs["form"].get("workflow-filtering"), [] if workflow_id: constraints.extend( [ models["service"].workflows.any( models["workflow"].id == int(workflow_id) ), ~or_( models["service"].scoped_name == name for name in ("Start", "End") ), ] ) elif kwargs["form"].get("parent-filtering", "true") == "true": constraints.append(~models["service"].workflows.any()) return constraints
Example #3
Source File: inventory.py From eNMS with GNU General Public License v3.0 | 6 votes |
def rbac_filter(cls, query, mode, user): public_pools = query.filter(cls.public == true()) user_pools = ( query.join(cls.access) .join(models["user"], models["access"].users) .filter(models["access"].pools_access.contains(mode)) .filter(models["user"].name == user.name) ) user_group_pools = ( query.join(cls.access) .join(models["group"], models["access"].groups) .join(models["user"], models["group"].users) .filter(models["access"].pools_access.contains(mode)) .filter(models["user"].name == user.name) ) return public_pools.union(user_pools, user_group_pools)
Example #4
Source File: inventory.py From eNMS with GNU General Public License v3.0 | 6 votes |
def rbac_filter(cls, query, mode, user): public_objects = query.filter(cls.public == true()) user_objects = ( query.join(cls.pools) .join(models["access"], models["pool"].access) .join(models["user"], models["access"].users) .filter(models["access"].pools_access.contains(mode)) .filter(models["user"].name == user.name) ) user_group_objects = ( query.join(cls.pools) .join(models["access"], models["pool"].access) .join(models["group"], models["access"].groups) .join(models["user"], models["group"].users) .filter(models["access"].pools_access.contains(mode)) .filter(models["user"].name == user.name) ) return public_objects.union(user_objects, user_group_objects)
Example #5
Source File: scheduling.py From eNMS with GNU General Public License v3.0 | 6 votes |
def rbac_filter(cls, query, mode, user): public_tasks = query.join(cls.service).filter( models["service"].public == true() ) user_tasks = ( query.join(cls.service) .join(models["access"], models["service"].access) .join(models["user"], models["access"].users) .filter(models["user"].name == user.name) ) user_group_tasks = ( query.join(cls.service) .join(models["access"], models["service"].access) .join(models["group"], models["access"].groups) .join(models["user"], models["group"].users) .filter(models["user"].name == user.name) ) return public_tasks.union(user_tasks, user_group_tasks)
Example #6
Source File: simulation.py From lpdec with GNU General Public License v3.0 | 6 votes |
def search(what, **conditions): if what == 'codename': columns = [db.codesTable.c.name] elif what == 'point': columns = [simTable.c.identifier, db.codesTable.c.name, db.decodersTable.c.name, simTable.c.channel_json, simTable.c.wordSeed, simTable.c.samples, simTable.c.errors, simTable.c.cputime, simTable.c.date_start, simTable.c.date_end, simTable.c.machine, simTable.c.program_name, simTable.c.program_version, simTable.c.stats] else: raise ValueError('unknown search: "{}"'.format(what)) condition = expression.true() for key, val in conditions.items(): if key == 'identifier': condition &= simTable.c.identifier.in_(val) elif key == 'code': condition &= db.codesTable.c.name.in_(val) else: raise ValueError() s = sqla.select(columns, whereclause=condition, from_obj=joinTable, distinct=True, use_labels=True).order_by(db.codesTable.c.name) ans = db.engine.execute(s).fetchall() if what == 'point': return [dataPointFromRow(row) for row in ans] return db.engine.execute(s).fetchall()
Example #7
Source File: api.py From manila with Apache License 2.0 | 6 votes |
def _share_group_type_get_query(context, session=None, read_deleted=None, expected_fields=None): expected_fields = expected_fields or [] query = model_query( context, models.ShareGroupTypes, session=session, read_deleted=read_deleted ).options( joinedload('group_specs'), joinedload('share_types'), ) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.ShareGroupTypes.is_public == true()] projects_attr = getattr(models.ShareGroupTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query
Example #8
Source File: api.py From manila with Apache License 2.0 | 6 votes |
def _share_type_get_query(context, session=None, read_deleted=None, expected_fields=None): expected_fields = expected_fields or [] query = (model_query(context, models.ShareTypes, session=session, read_deleted=read_deleted). options(joinedload('extra_specs'))) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.ShareTypes.is_public == true()] projects_attr = getattr(models.ShareTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query
Example #9
Source File: rule.py From rucio with Apache License 2.0 | 6 votes |
def release_parent_rule(child_rule_id, remove_parent_expiration=False, session=None): """ Release a potential parent rule, because the child_rule is OK. :param child_rule_id: The child rule id. :param remove_parant_expiration: If true, removes the expiration of the parent rule. :param session: The Database session """ session.flush() parent_rules = session.query(models.ReplicationRule).filter_by(child_rule_id=child_rule_id).\ with_hint(models.ReplicationRule, "index(RULES RULES_CHILD_RULE_ID_IDX)", 'oracle').all() for rule in parent_rules: if remove_parent_expiration: rule.expires_at = None rule.child_rule_id = None insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
Example #10
Source File: dispatcher.py From backend.ai-manager with GNU Lesser General Public License v3.0 | 5 votes |
def _list_agents_by_sgroup( db_conn: SAConnection, sgroup_name: str, ) -> Sequence[AgentContext]: query = ( sa.select([ agents.c.id, agents.c.addr, agents.c.scaling_group, agents.c.available_slots, agents.c.occupied_slots, ], for_update=True) .select_from(agents) .where( (agents.c.status == AgentStatus.ALIVE) & (agents.c.scaling_group == sgroup_name) & (agents.c.schedulable == true()) ) ) items = [] async for row in db_conn.execute(query): item = AgentContext( row['id'], row['addr'], row['scaling_group'], row['available_slots'], row['occupied_slots'], ) items.append(item) return items
Example #11
Source File: 1d9e835a84f9_.py From incubator-superset with Apache License 2.0 | 5 votes |
def upgrade(): op.add_column( "dbs", sa.Column( "allow_csv_upload", sa.Boolean(), nullable=False, server_default=expression.true(), ), )
Example #12
Source File: models.py From thinkhazard with GNU General Public License v3.0 | 5 votes |
def last_complete_date(cls, dbsession): last_complete = ( dbsession.query(cls) .filter(cls.complete == true()) .order_by(cls.date.desc()) .first() ) if last_complete is None: return None return last_complete.date
Example #13
Source File: dbs.py From banzai with GNU General Public License v3.0 | 5 votes |
def add_or_update_record(db_session, table_model, equivalence_criteria, record_attributes): """ Add a record to the database if it does not exist or update the record if it does exist. Parameters ---------- db_session : SQLAlchemy database session session must be active table_model : SQLAlchemy Base The class representation of the table of interest equivalence_criteria : dict record attributes that need to match for the records to be considered the same record_attributes : dict record attributes that will be set/updated Returns ------- record : SQLAlchemy Base The object representation of the added/updated record Notes ----- The added/updated record is added to the database but not committed. You need to call db_session.commit() to write the changes to the database. """ query = true() for key in equivalence_criteria.keys(): query &= getattr(table_model, key) == equivalence_criteria[key] record = db_session.query(table_model).filter(query).first() if record is None: record = table_model(**record_attributes) db_session.add(record) for attribute in record_attributes: setattr(record, attribute, record_attributes[attribute]) return record
Example #14
Source File: place.py From osm-wikidata with GNU General Public License v3.0 | 5 votes |
def matcher_query(self): return (PlaceItem.query .join(Item) .filter(Item.entity.isnot(None), PlaceItem.place == self, or_(PlaceItem.done.is_(None), PlaceItem.done != true())) .order_by(PlaceItem.item_id))
Example #15
Source File: place.py From osm-wikidata with GNU General Public License v3.0 | 5 votes |
def reset_all_items_to_not_done(self): place_items = (PlaceItem.query .join(Item) .filter(Item.entity.isnot(None), PlaceItem.place == self, PlaceItem.done == true()) .order_by(PlaceItem.item_id)) for place_item in place_items: place_item.done = False session.commit()
Example #16
Source File: sqlbase.py From AnyBlok with Mozilla Public License 2.0 | 5 votes |
def get_primary_keys(cls): """ return the name of the primary keys of the model :type: list of the primary keys name """ C = cls.registry.System.Column query = C.query().distinct(C.name).options(load_only(C.name)) query = query.filter(C.model.in_(cls.get_all_registry_names())) query = query.filter(C.primary_key == true()) return query.all().name
Example #17
Source File: oidc.py From rucio with Apache License 2.0 | 5 votes |
def refresh_jwt_tokens(total_workers, worker_number, refreshrate=3600, limit=1000, session=None): """ Refreshes tokens which expired or will expire before (now + refreshrate) next run of this function and which have valid refresh token. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum number of tokens to refresh per call. :param session: Database session in use. :return: numper of tokens refreshed """ nrefreshed = 0 try: # get tokens for refresh that expire in the next <refreshrate> seconds expiration_future = datetime.utcnow() + timedelta(seconds=refreshrate) query = session.query(models.Token.token) \ .filter(and_(models.Token.refresh == true(), models.Token.refresh_expired_at > datetime.utcnow(), models.Token.expired_at < expiration_future))\ .order_by(models.Token.expired_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='token') # limiting the number of tokens for refresh filtered_tokens_query = query.limit(limit) filtered_tokens = [] filtered_bunches = query_bunches(filtered_tokens_query, 10) for items in filtered_bunches: filtered_tokens += session.query(models.Token).filter(models.Token.token.in_(items)).with_for_update(skip_locked=True).all() # refreshing these tokens for token in filtered_tokens: new_token = __refresh_token_oidc(token, session=session) if new_token: nrefreshed += 1 except Exception as error: raise RucioException(error.args) return nrefreshed
Example #18
Source File: request.py From rucio with Apache License 2.0 | 5 votes |
def get_heavy_load_rses(threshold, session=None): """ Retrieve heavy load rses. :param threshold: Threshold as an int. :param session: Database session to use. :returns: . """ try: results = session.query(models.Source.rse_id, func.count(models.Source.rse_id).label('load'))\ .filter(models.Source.is_using == true())\ .group_by(models.Source.rse_id)\ .all() if not results: return result = [] for t in results: if t[1] >= threshold: t2 = {'rse_id': t[0], 'load': t[1]} result.append(t2) return result except IntegrityError as error: raise RucioException(error.args)
Example #19
Source File: test_oauthmanager.py From rucio with Apache License 2.0 | 5 votes |
def new_tokens_ok(accountstring): session = get_session() result = session.query(models.Token).filter_by(account=InternalAccount(accountstring), refresh=true()).all() # pylint: disable=no-member token_names_expected = ["10_original_refreshed_and_deleted", "11_to_be_kept_and_refreshed", "14_original_refreshed_and_deleted", "17_to_be_kept_and_refreshed"] selection = [] for elem in result: if elem.refresh_token is not None: if elem.refresh_token in str(elem.oidc_scope): selection.append(elem.refresh_token) return all(item in token_names_expected for item in selection)
Example #20
Source File: test_oauthmanager.py From rucio with Apache License 2.0 | 5 votes |
def get_token_count_with_refresh_true(accountstring): session = get_session() result = session.query(models.Token.token).filter_by(account=InternalAccount(accountstring), refresh=true()).all() # pylint: disable=no-member return len(result)
Example #21
Source File: helper.py From calibre-web with GNU General Public License v3.0 | 5 votes |
def tags_filters(): negtags_list = current_user.list_denied_tags() postags_list = current_user.list_allowed_tags() neg_content_tags_filter = false() if negtags_list == [''] else db.Tags.name.in_(negtags_list) pos_content_tags_filter = true() if postags_list == [''] else db.Tags.name.in_(postags_list) return and_(pos_content_tags_filter, ~neg_content_tags_filter) # checks if domain is in database (including wildcards) # example SELECT * FROM @TABLE WHERE 'abcdefg' LIKE Name; # from https://code.luasoftware.com/tutorials/flask/execute-raw-sql-in-flask-sqlalchemy/
Example #22
Source File: db.py From calibre-web with GNU General Public License v3.0 | 5 votes |
def get_typeahead(self, database, query, replace=('', ''), tag_filter=true()): query = query or '' self.session.connection().connection.connection.create_function("lower", 1, lcase) entries = self.session.query(database).filter(tag_filter). \ filter(func.lower(database.name).ilike("%" + query + "%")).all() json_dumps = json.dumps([dict(name=r.name.replace(*replace)) for r in entries]) return json_dumps
Example #23
Source File: db.py From calibre-web with GNU General Public License v3.0 | 5 votes |
def common_filters(self, allow_show_archived=False): if not allow_show_archived: archived_books = ( ub.session.query(ub.ArchivedBook) .filter(ub.ArchivedBook.user_id == int(current_user.id)) .filter(ub.ArchivedBook.is_archived == True) .all() ) archived_book_ids = [archived_book.book_id for archived_book in archived_books] archived_filter = Books.id.notin_(archived_book_ids) else: archived_filter = true() if current_user.filter_language() != "all": lang_filter = Books.languages.any(Languages.lang_code == current_user.filter_language()) else: lang_filter = true() negtags_list = current_user.list_denied_tags() postags_list = current_user.list_allowed_tags() neg_content_tags_filter = false() if negtags_list == [''] else Books.tags.any(Tags.name.in_(negtags_list)) pos_content_tags_filter = true() if postags_list == [''] else Books.tags.any(Tags.name.in_(postags_list)) if self.config.config_restricted_column: pos_cc_list = current_user.allowed_column_value.split(',') pos_content_cc_filter = true() if pos_cc_list == [''] else \ getattr(Books, 'custom_column_' + str(self.config.config_restricted_column)). \ any(cc_classes[self.config.config_restricted_column].value.in_(pos_cc_list)) neg_cc_list = current_user.denied_column_value.split(',') neg_content_cc_filter = false() if neg_cc_list == [''] else \ getattr(Books, 'custom_column_' + str(self.config.config_restricted_column)). \ any(cc_classes[self.config.config_restricted_column].value.in_(neg_cc_list)) else: pos_content_cc_filter = true() neg_content_cc_filter = false() return and_(lang_filter, pos_content_tags_filter, ~neg_content_tags_filter, pos_content_cc_filter, ~neg_content_cc_filter, archived_filter) # Fill indexpage with all requested data from database
Example #24
Source File: web.py From calibre-web with GNU General Public License v3.0 | 5 votes |
def remote_login(): auth_token = ub.RemoteAuthToken() ub.session.add(auth_token) ub.session.commit() verify_url = url_for('web.verify_token', token=auth_token.auth_token, _external=true) log.debug(u"Remot Login request with token: %s", auth_token.auth_token) return render_title_template('remote_login.html', title=_(u"login"), token=auth_token.auth_token, verify_url=verify_url, page="remotelogin")
Example #25
Source File: session.py From backend.ai-manager with GNU Lesser General Public License v3.0 | 5 votes |
def report_stats(app: web.Application) -> None: stats_monitor = app['stats_monitor'] await stats_monitor.report_metric( GAUGE, 'ai.backend.gateway.coroutines', len(asyncio.Task.all_tasks())) all_inst_ids = [ inst_id async for inst_id in app['registry'].enumerate_instances()] await stats_monitor.report_metric( GAUGE, 'ai.backend.gateway.agent_instances', len(all_inst_ids)) async with app['dbpool'].acquire() as conn, conn.begin(): query = (sa.select([sa.func.sum(keypairs.c.concurrency_used)]) .select_from(keypairs)) n = await conn.scalar(query) await stats_monitor.report_metric( GAUGE, 'ai.backend.gateway.active_kernels', n) subquery = (sa.select([sa.func.count()]) .select_from(keypairs) .where(keypairs.c.is_active == true()) .group_by(keypairs.c.user_id)) query = sa.select([sa.func.count()]).select_from(subquery.alias()) n = await conn.scalar(query) await stats_monitor.report_metric( GAUGE, 'ai.backend.users.has_active_key', n) subquery = subquery.where(keypairs.c.last_used != null()) query = sa.select([sa.func.count()]).select_from(subquery.alias()) n = await conn.scalar(query) await stats_monitor.report_metric( GAUGE, 'ai.backend.users.has_used_key', n) ''' query = sa.select([sa.func.count()]).select_from(usage) n = await conn.scalar(query) await stats_monitor.report_metric( GAUGE, 'ai.backend.gateway.accum_kernels', n) '''
Example #26
Source File: 9cd61b1ae70d_add_scheduable_field_to_agents.py From backend.ai-manager with GNU Lesser General Public License v3.0 | 5 votes |
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('agents', sa.Column( 'schedulable', sa.Boolean(), server_default=true(), default=True, nullable=False, )) # ### end Alembic commands ###
Example #27
Source File: experiment_server.py From Dallinger with MIT License | 4 votes |
def summary(): """Summarize the participants' status codes.""" exp = Experiment(session) state = { "status": "success", "summary": exp.log_summary(), "completed": exp.is_complete(), } unfilled_nets = ( models.Network.query.filter(models.Network.full != true()) .with_entities(models.Network.id, models.Network.max_size) .all() ) working = ( models.Participant.query.filter_by(status="working") .with_entities(func.count(models.Participant.id)) .scalar() ) state["unfilled_networks"] = len(unfilled_nets) nodes_remaining = 0 required_nodes = 0 if state["unfilled_networks"] == 0: if working == 0 and state["completed"] is None: state["completed"] = True else: for net in unfilled_nets: node_count = ( models.Node.query.filter_by(network_id=net.id, failed=False) .with_entities(func.count(models.Node.id)) .scalar() ) net_size = net.max_size required_nodes += net_size nodes_remaining += net_size - node_count state["nodes_remaining"] = nodes_remaining state["required_nodes"] = required_nodes if state["completed"] is None: state["completed"] = False # Regenerate a waiting room message when checking status # to counter missed messages at the end of the waiting room nonfailed_count = models.Participant.query.filter( (models.Participant.status == "working") | (models.Participant.status == "overrecruited") | (models.Participant.status == "submitted") | (models.Participant.status == "approved") ).count() exp = Experiment(session) overrecruited = exp.is_overrecruited(nonfailed_count) if exp.quorum: quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited} db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum)) return Response(dumps(state), status=200, mimetype="application/json")
Example #28
Source File: did.py From rucio with Apache License 2.0 | 4 votes |
def list_expired_dids(worker_number=None, total_workers=None, limit=None, session=None): """ List expired data identifiers. :param limit: limit number. :param session: The database session in use. """ stmt = exists().where(and_(models.ReplicationRule.scope == models.DataIdentifier.scope, models.ReplicationRule.name == models.DataIdentifier.name, models.ReplicationRule.locked == true())) query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.created_at, models.DataIdentifier.purge_replicas).\ filter(models.DataIdentifier.expired_at < datetime.utcnow(), not_(stmt)).\ order_by(models.DataIdentifier.expired_at).\ with_hint(models.DataIdentifier, "index(DIDS DIDS_EXPIRED_AT_IDX)", 'oracle') if session.bind.dialect.name in ['oracle', 'mysql', 'postgresql']: query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') elif session.bind.dialect.name == 'sqlite' and worker_number and total_workers and total_workers > 0: row_count = 0 dids = list() for scope, name, did_type, created_at, purge_replicas in query.yield_per(10): if int(md5(name).hexdigest(), 16) % total_workers == worker_number: dids.append({'scope': scope, 'name': name, 'did_type': did_type, 'created_at': created_at, 'purge_replicas': purge_replicas}) row_count += 1 if limit and row_count >= limit: return dids return dids else: if worker_number and total_workers: raise exception.DatabaseException('The database type %s returned by SQLAlchemy is invalid.' % session.bind.dialect.name) if limit: query = query.limit(limit) return [{'scope': scope, 'name': name, 'did_type': did_type, 'created_at': created_at, 'purge_replicas': purge_replicas} for scope, name, did_type, created_at, purge_replicas in query]
Example #29
Source File: replica.py From rucio with Apache License 2.0 | 4 votes |
def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blacklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ file_clause, dataset_clause, state_clause, files, constituents = _resolve_dids(dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) for f in _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files, rse_clause, rse_expression, client_location, domain, sign_urls, signature_lifetime, constituents, resolve_parents, updated_after, session): yield f
Example #30
Source File: registry.py From backend.ai-manager with GNU Lesser General Public License v3.0 | 4 votes |
def get_kernel( self, kern_id: uuid.UUID, field=None, allow_stale: bool = False, db_connection=None, ): ''' Retrieve the kernel information from the given kernel ID. This ID is unique for all individual agent-spawned containers. If ``field`` is given, it extracts only the raw value of the given field, without wrapping it as Kernel object. If ``allow_stale`` is true, it skips checking validity of the kernel owner instance. ''' cols = [kernels.c.id, kernels.c.sess_id, kernels.c.agent_addr, kernels.c.kernel_host, kernels.c.access_key] if field == '*': cols = [sa.text('*')] elif isinstance(field, (tuple, list)): cols.extend(field) elif isinstance(field, (sa.Column, sa.sql.elements.ColumnClause)): cols.append(field) elif isinstance(field, str): cols.append(sa.column(field)) async with reenter_txn(self.dbpool, db_connection) as conn: if allow_stale: query = ( sa.select(cols) .select_from(kernels) .where(kernels.c.id == kern_id) .limit(1).offset(0)) else: query = ( sa.select(cols) .select_from(kernels.join(agents)) .where( (kernels.c.id == kern_id) & ~(kernels.c.status.in_(DEAD_KERNEL_STATUSES)) & (agents.c.status == AgentStatus.ALIVE) & (agents.c.id == kernels.c.agent) ) .limit(1).offset(0)) result = await conn.execute(query) row = await result.first() if row is None: raise SessionNotFound return row