Python sqlalchemy.orm.session.Session() Examples

The following are 30 code examples of sqlalchemy.orm.session.Session(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlalchemy.orm.session , or try the search function .
Example #1
Source File: xcom_endpoint.py    From airflow with Apache License 2.0 8 votes vote down vote up
def get_xcom_entry(
    dag_id: str,
    task_id: str,
    dag_run_id: str,
    xcom_key: str,
    session: Session
) -> XComCollectionItemSchema:
    """
    Get an XCom entry
    """
    query = session.query(XCom)
    query = query.filter(and_(XCom.dag_id == dag_id,
                              XCom.task_id == task_id,
                              XCom.key == xcom_key))
    query = query.join(DR, and_(XCom.dag_id == DR.dag_id, XCom.execution_date == DR.execution_date))
    query = query.filter(DR.run_id == dag_run_id)

    query_object = query.one_or_none()
    if not query_object:
        raise NotFound("XCom entry not found")
    return xcom_collection_item_schema.dump(query_object) 
Example #2
Source File: game_test.py    From nekoyume with MIT License 6 votes vote down vote up
def test_prevent_hack_and_slash_when_dead(
        fx_test_client: FlaskClient, fx_session: Session, fx_user: User,
        fx_private_key: PrivateKey, fx_novice_status: typing.Dict[str, str],
):
    move = fx_user.create_novice(fx_novice_status)
    Block.create(fx_user, [move])

    assert fx_user.avatar().dead is False
    while fx_user.avatar().hp > 0:
        move = fx_user.hack_and_slash()
        Block.create(fx_user, [move])
    assert fx_user.avatar().dead is True

    response = fx_test_client.post('/session_moves', data={
        'name': 'hack_and_slash'
    })
    assert response.status_code == 302 
Example #3
Source File: broadcast_test.py    From nekoyume with MIT License 6 votes vote down vote up
def broadcast_node_failed(fx_session: scoped_session,
                          fx_other_session: Session, error):
    now = datetime.datetime.utcnow()
    node = Node(url='http://test.neko',
                last_connected_at=now)
    node2 = Node(url='http://other.neko',
                 last_connected_at=datetime.datetime.utcnow())
    fx_session.add(node)
    fx_session.commit()
    fx_other_session.add(node2)
    fx_other_session.commit()
    assert not fx_session.query(Node).filter(Node.url == node2.url).first()
    with Mocker() as m:
        m.post('http://test.neko', exc=error)
        multicast(serialized={'url': fx_other_server.url},
                  broadcast=broadcast_node)
    assert not fx_session.query(Node).filter(Node.url == node2.url).first()
    assert node.last_connected_at == now 
Example #4
Source File: test_biweeklypayperiod.py    From biweeklybudget with GNU Affero General Public License v3.0 6 votes vote down vote up
def setup(self):
        self.mock_sess = Mock(spec_set=Session)
        self.cls = BiweeklyPayPeriod(date(2017, 3, 7), self.mock_sess)
        m_account = Mock(name='foo')
        type(m_account).name = 'foo'
        m_budget = Mock(name='bar')
        type(m_budget).name = 'bar'
        self.m_st = Mock(
            spec_set=ScheduledTransaction,
            id=123,
            description='desc',
            amount=Decimal('123.45'),
            account_id=2,
            account=m_account,
            budget_id=3,
            budget=m_budget
        ) 
Example #5
Source File: broadcast_test.py    From nekoyume with MIT License 6 votes vote down vote up
def test_broadcast_move(
        fx_server: WSGIServer,
        fx_session: scoped_session,
        fx_other_server: WSGIServer,
        fx_other_session: Session,
        fx_user: User,
        fx_novice_status: typing.Mapping[str, str],
):
    now = datetime.datetime.utcnow()
    node = Node(url=fx_server.url,
                last_connected_at=now)
    node2 = Node(url=fx_other_server.url,
                 last_connected_at=datetime.datetime.utcnow())
    move = fx_user.create_novice(fx_novice_status)
    fx_session.add_all([node, node2, move])
    fx_session.commit()
    assert not fx_other_session.query(Move).get(move.id)
    serialized = move.serialize(
        use_bencode=False,
        include_signature=True,
        include_id=True,
    )
    multicast(serialized=serialized, broadcast=broadcast_move)
    assert fx_other_session.query(Move).get(move.id)
    assert node.last_connected_at > now 
Example #6
Source File: base.py    From marcotti with MIT License 6 votes vote down vote up
def create_session(self):
        """
        Open transaction session with an active database object.
        
        If an error occurs during the session, roll back uncommitted changes
        and report error to log file and user.
        
        If session is no longer needed, commit remaining transactions before closing it.
        """
        session = Session(self.connection)
        logger.info("Create session {0} with {1}".format(
            id(session), self._public_db_uri(str(self.engine.url))))
        try:
            yield session
            session.commit()
            logger.info("Committing remaining transactions to database")
        except Exception as ex:
            session.rollback()
            logger.exception("Database transactions rolled back")
            raise ex
        finally:
            logger.info("Session {0} with {1} closed".format(
                id(session), self._public_db_uri(str(self.engine.url))))
            session.close() 
Example #7
Source File: test_database_session.py    From nameko-sqlalchemy with Apache License 2.0 6 votes vote down vote up
def test_multiple_workers(db_session):
    db_session.setup()

    worker_ctx_1 = Mock(spec=WorkerContext)
    session_1 = db_session.get_dependency(worker_ctx_1)
    assert isinstance(session_1, Session)
    assert db_session.sessions[worker_ctx_1] is session_1

    worker_ctx_2 = Mock(spec=WorkerContext)
    session_2 = db_session.get_dependency(worker_ctx_2)
    assert isinstance(session_2, Session)
    assert db_session.sessions[worker_ctx_2] is session_2

    assert db_session.sessions == WeakKeyDictionary({
        worker_ctx_1: session_1,
        worker_ctx_2: session_2
    }) 
Example #8
Source File: dagrun.py    From airflow with Apache License 2.0 6 votes vote down vote up
def get_previous_dagrun(self, state: Optional[str] = None, session: Session = None) -> Optional['DagRun']:
        """The previous DagRun, if there is one"""

        session = cast(Session, session)  # mypy

        filters = [
            DagRun.dag_id == self.dag_id,
            DagRun.execution_date < self.execution_date,
        ]
        if state is not None:
            filters.append(DagRun.state == state)
        return session.query(DagRun).filter(
            *filters
        ).order_by(
            DagRun.execution_date.desc()
        ).first() 
Example #9
Source File: broadcast_test.py    From nekoyume with MIT License 6 votes vote down vote up
def test_broadcast_node(
        fx_server: WSGIServer,
        fx_session: scoped_session,
        fx_other_server: WSGIServer,
        fx_other_session: Session,
):
    now = datetime.datetime.utcnow()
    node = Node(url=fx_server.url,
                last_connected_at=now)
    node2 = Node(url=fx_other_server.url,
                 last_connected_at=datetime.datetime.utcnow())
    fx_session.add(node)
    fx_session.commit()
    fx_other_session.add(node2)
    fx_other_session.commit()
    assert not fx_session.query(Node).filter(Node.url == node2.url).first()
    multicast(serialized={'url': fx_other_server.url},
              broadcast=broadcast_node)
    assert fx_session.query(Node).filter(Node.url == node2.url).first()
    assert node.last_connected_at > now 
Example #10
Source File: dagrun.py    From airflow with Apache License 2.0 6 votes vote down vote up
def _are_premature_tis(
        self,
        unfinished_tasks: List[TI],
        finished_tasks: List[TI],
        session: Session,
    ) -> bool:
        # there might be runnable tasks that are up for retry and for some reason(retry delay, etc) are
        # not ready yet so we set the flags to count them in
        for ut in unfinished_tasks:
            if ut.are_dependencies_met(
                dep_context=DepContext(
                    flag_upstream_failed=True,
                    ignore_in_retry_period=True,
                    ignore_in_reschedule_period=True,
                    finished_tasks=finished_tasks),
                    session=session):
                return True
        return False 
Example #11
Source File: dep_context.py    From airflow with Apache License 2.0 6 votes vote down vote up
def ensure_finished_tasks(self, dag, execution_date: pendulum.DateTime, session: Session):
        """
        This method makes sure finished_tasks is populated if it's currently None.
        This is for the strange feature of running tasks without dag_run.

        :param dag: The DAG for which to find finished tasks
        :type dag: airflow.models.DAG
        :param execution_date: The execution_date to look for
        :param session: Database session to use
        :return: A list of all the finished tasks of this DAG and execution_date
        :rtype: list[airflow.models.TaskInstance]
        """
        if self.finished_tasks is None:
            self.finished_tasks = dag.get_task_instances(
                start_date=execution_date,
                end_date=execution_date,
                state=State.finished() + [State.UPSTREAM_FAILED],
                session=session,
            )
        return self.finished_tasks 
Example #12
Source File: node.py    From nekoyume with MIT License 6 votes vote down vote up
def get(cls, url: str, session: Session=db.session):
        get_node = Node.query.filter_by(url=url).first
        node = get_node()
        if node:
            return node
        elif get(f'{url}/ping').text == 'pong':
            node = Node(url=url, last_connected_at=datetime.datetime.utcnow())
            if session:
                session.add(node)
                try:
                    session.commit()
                except IntegrityError:
                    node = get_node()
                    if node is None:
                        raise
                    return node
            return node
        else:
            return None 
Example #13
Source File: 2019_04_04_f972b83f1baa_tag_mode.py    From sticker-finder with MIT License 6 votes vote down vote up
def upgrade():
    op.add_column('chat', sa.Column('tag_mode', sa.String(), nullable=True))
    session = Session(bind=op.get_bind())
    # Set all changes to reviewed, where an task exists
    session.query(Chat) \
        .filter(Chat.fix_single_sticker) \
        .update({'tag_mode': TagMode.SINGLE_STICKER})

    session.query(Chat) \
        .filter(Chat.tagging_random_sticker) \
        .update({'tag_mode': TagMode.RANDOM})

    session.query(Chat) \
        .filter(Chat.full_sticker_set) \
        .update({'tag_mode': TagMode.STICKER_SET})

    op.drop_index('ix_chat_current_sticker_set_name', table_name='chat')
    op.drop_constraint('chat_current_sticker_set_name_fkey', 'chat', type_='foreignkey')
    op.drop_column('chat', 'current_sticker_set_name')

    op.drop_constraint("only_one_action_check", "chat") 
Example #14
Source File: pool.py    From airflow with Apache License 2.0 6 votes vote down vote up
def running_slots(self, session: Session):
        """
        Get the number of slots used by running tasks at the moment.

        :param session: SQLAlchemy ORM Session
        :return: the used number of slots
        """
        from airflow.models.taskinstance import TaskInstance  # Avoid circular import

        return (
            session
            .query(func.sum(TaskInstance.pool_slots))
            .filter(TaskInstance.pool == self.pool)
            .filter(TaskInstance.state == State.RUNNING)
            .scalar()
        ) or 0 
Example #15
Source File: dag.py    From airflow with Apache License 2.0 6 votes vote down vote up
def get_paused_dag_ids(dag_ids: List[str], session: Session = None) -> Set[str]:
        """
        Given a list of dag_ids, get a set of Paused Dag Ids

        :param dag_ids: List of Dag ids
        :param session: ORM Session
        :return: Paused Dag_ids
        """
        paused_dag_ids = (
            session.query(DagModel.dag_id)
            .filter(DagModel.is_paused.is_(True))
            .filter(DagModel.dag_id.in_(dag_ids))
            .all()
        )

        paused_dag_ids = set(paused_dag_id for paused_dag_id, in paused_dag_ids)
        return paused_dag_ids 
Example #16
Source File: user.py    From hydrus with MIT License 6 votes vote down vote up
def add_token(request: LocalProxy, session: Session) -> str:
    """
    Create a new token for the user or return a
    valid existing token to the user.
    """
    token = None
    id_ = int(request.authorization['username'])
    try:
        token = session.query(Token).filter(Token.user_id == id_).one()
        if not token.is_valid():
            update_token = '%030x' % randrange(16**30)
            token.id = update_token
            token.timestamp = datetime.now()
            session.commit()
    except NoResultFound:
        token = '%030x' % randrange(16**30)
        new_token = Token(user_id=id_, id=token)
        session.add(new_token)
        session.commit()
        return token
    return token.id 
Example #17
Source File: user.py    From hydrus with MIT License 6 votes vote down vote up
def add_user(id_: int, paraphrase: str, session: Session) -> None:
    """Add new users to the database.

    Raises:
        UserExits: If a user with `id_` already exists.

    """
    if session.query(exists().where(User.id == id_)).scalar():
        raise UserExists(id_=id_)
    else:
        new_user = User(id=id_, paraphrase=sha224(
            paraphrase.encode('utf-8')).hexdigest())
        session.add(new_user)
        session.commit() 
Example #18
Source File: 2019_04_15_35223866defb_fix_tag_schema.py    From sticker-finder with MIT License 5 votes vote down vote up
def upgrade():
    """Fix wrong constraints."""
    # Drop all constraints first
    op.drop_constraint('change_added_tags_tag_name_fkey', 'change_added_tags', type_='foreignkey')
    op.drop_constraint('change_removed_tags_tag_name_fkey', 'change_removed_tags', type_='foreignkey')
    op.drop_constraint('sticker_tag_tag_name_fkey', 'sticker_tag', type_='foreignkey')
    op.drop_column('sticker_tag', 'tag_is_default_language')

    op.drop_constraint('tag_pkey', 'tag')

    # Remove all tags that exist in both languages.
    session = Session(bind=op.get_bind())
    duplicate_tags = session.query(Tag.name) \
        .group_by(Tag.name) \
        .having(func.count(Tag.name) > 1) \
        .all()

    duplicate_names = [tag[0] for tag in duplicate_tags]

    session.query(Tag) \
        .filter(Tag.is_default_language.is_(False)) \
        .filter(Tag.name.in_(duplicate_names)) \
        .delete(synchronize_session='fetch')

    # Recreate tag.name pkey
    op.create_primary_key('tag_pkey', 'tag', ['name'])

    # Create other foreign keys
    op.create_foreign_key(
        'change_added_tags_tag_name_fkey', 'change_added_tags', 'tag',
        ['tag_name'], ['name'],
        onupdate='cascade', ondelete='cascade', deferrable=True)
    op.create_foreign_key(
        'change_removed_tags_tag_name_fkey', 'change_removed_tags', 'tag',
        ['tag_name'], ['name'],
        onupdate='cascade', ondelete='cascade', deferrable=True)
    op.create_foreign_key(
        'sticker_tag_tag_name_fkey', 'sticker_tag', 'tag',
        ['tag_name'], ['name'],
        onupdate='cascade', ondelete='cascade', deferrable=True) 
Example #19
Source File: config.py    From n6 with GNU Affero General Public License v3.0 5 votes vote down vote up
def commit_or_rollback(self, session, exc_type, _exc_value, _tb):
        assert isinstance(session, Session)
        if exc_type is None:
            with self.commit_wrapper(session):
                session.commit()
        else:
            session.rollback() 
Example #20
Source File: config.py    From n6 with GNU Affero General Public License v3.0 5 votes vote down vote up
def make_nested_savepoint(self):
        session = self.get_current_session()
        assert isinstance(session, Session)
        return session.begin_nested() 
Example #21
Source File: 2019_04_13_888b710775ea_move_change_tags_to_new_format.py    From sticker-finder with MIT License 5 votes vote down vote up
def upgrade():
    """Actually change the change sets to the new format."""
    session = Session(bind=op.get_bind())
    changes = session.query(Change) \
        .order_by(Change.created_at.desc()) \
        .all()

    for change in changes:
        old_tags = set(get_tags_from_text(change.old_tags))
        new_tags = set(get_tags_from_text(change.new_tags))

        added_tags = list(new_tags - old_tags)
        removed_tags = list(old_tags - new_tags)

        added_tags = session.query(Tag) \
            .filter(Tag.name.in_(added_tags)) \
            .all()

        removed_tags = session.query(Tag) \
            .filter(or_(
                Tag.is_default_language.is_(change.is_default_language),
                Tag.emoji
            )) \
            .filter(Tag.name.in_(removed_tags)) \
            .all()

        change.removed_tags = removed_tags
        change.added_tags = added_tags

    session.commit() 
Example #22
Source File: db_manager.py    From soweego with GNU General Public License v3.0 5 votes vote down vote up
def new_session(self) -> session.Session:
        """Create a new DB session"""
        Session = sessionmaker(bind=self.__engine)
        return Session() 
Example #23
Source File: 2019_03_20_b17dcce8c3f3_sticker_usages.py    From sticker-finder with MIT License 5 votes vote down vote up
def upgrade():
    op.create_table(
        'sticker_usage',
        sa.Column('sticker_file_id', sa.String(), nullable=False),
        sa.Column('user_id', sa.BigInteger(), nullable=False),
        sa.Column('usage_count', sa.Integer(), nullable=False),
        sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
        sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
        sa.ForeignKeyConstraint(['sticker_file_id'], ['sticker.file_id'], onupdate='cascade', ondelete='cascade', deferrable=True),
        sa.ForeignKeyConstraint(['user_id'], ['user.id'], onupdate='cascade', ondelete='cascade', deferrable=True),
        sa.PrimaryKeyConstraint('sticker_file_id', 'user_id'),
    )
    op.create_index(op.f('ix_sticker_usage_sticker_file_id'), 'sticker_usage', ['sticker_file_id'], unique=False)
    op.create_index(op.f('ix_sticker_usage_user_id'), 'sticker_usage', ['user_id'], unique=False)

    session = Session(bind=op.get_bind())
    usages = session.query(User, func.count(InlineQuery.sticker_file_id)) \
        .join(InlineQuery) \
        .join(Sticker) \
        .add_entity(Sticker) \
        .filter(InlineQuery.sticker_file_id.isnot(None)) \
        .group_by(User, Sticker) \
        .all()

    for usage in usages:
        user = usage[0]
        count = usage[1]
        sticker = usage[2]
        sticker_usage = StickerUsage(user, sticker)
        sticker_usage.usage_count = count
        session.add(sticker_usage)

    session.commit() 
Example #24
Source File: config.py    From n6 with GNU Affero General Public License v3.0 5 votes vote down vote up
def finalize_session(self, session, exc_type, exc_value, tb):
        assert isinstance(session, Session)
        try:
            self.commit_or_rollback(session, exc_type, exc_value, tb)
        finally:
            session.close() 
Example #25
Source File: config.py    From n6 with GNU Affero General Public License v3.0 5 votes vote down vote up
def finalize_nested_savepoint(self, _savepoint, exc_type, exc_value, tb):
        session = self.get_current_session()
        assert isinstance(session, Session)
        self.commit_or_rollback(session, exc_type, exc_value, tb) 
Example #26
Source File: block_test.py    From nekoyume with MIT License 5 votes vote down vote up
def test_sync_node_unavailable_on_get_blocks(
        fx_user: User, fx_session: scoped_session,
        fx_server: WSGIServer, fx_other_session: Session,
        fx_novice_status: typing.Mapping[str, str],
        code: int
):
    move = fx_user.create_novice(fx_novice_status)
    block = Block.create(fx_user, [move])
    Block.sync(Node(url=fx_server.url), fx_other_session)
    serialized = block.serialize(
        use_bencode=False,
        include_suffix=True,
        include_moves=True,
        include_hash=True
    )
    serialized['id'] = block.id + 1
    with Mocker() as m:
        m.register_uri(
            'GET', f'{fx_server.url}/blocks/last',
            json={'block': serialized},
            status_code=200,
        )
        m.register_uri(
            'GET', f'{fx_server.url}/blocks/1',
            json={'block': serialized},
            status_code=200,
        )
        m.get(url=f'{fx_server.url}/blocks', status_code=code)
        Block.sync(Node(url=fx_server.url), fx_other_session)
        assert not fx_other_session.query(Block).get(serialized['id']) 
Example #27
Source File: backfill_job.py    From airflow with Apache License 2.0 5 votes vote down vote up
def _task_instances_for_dag_run(self, dag_run, session=None):
        """
        Returns a map of task instance key to task instance object for the tasks to
        run in the given dag run.

        :param dag_run: the dag run to get the tasks from
        :type dag_run: airflow.models.DagRun
        :param session: the database session object
        :type session: sqlalchemy.orm.session.Session
        """
        tasks_to_run = {}

        if dag_run is None:
            return tasks_to_run

        # check if we have orphaned tasks
        self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)

        # for some reason if we don't refresh the reference to run is lost
        dag_run.refresh_from_db()
        make_transient(dag_run)

        try:
            for ti in dag_run.get_task_instances():
                # all tasks part of the backfill are scheduled to run
                if ti.state == State.NONE:
                    ti.set_state(State.SCHEDULED, session=session, commit=False)
                if ti.state != State.REMOVED:
                    tasks_to_run[ti.key] = ti
            session.commit()
        except Exception:
            session.rollback()
            raise

        return tasks_to_run 
Example #28
Source File: subdag_operator.py    From airflow with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 subdag: DAG,
                 session: Optional[Session] = None,
                 propagate_skipped_state: Optional[SkippedStatePropagationOptions] = None,
                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.subdag = subdag
        self.propagate_skipped_state = propagate_skipped_state

        self._validate_dag(kwargs)
        self._validate_pool(session) 
Example #29
Source File: pool.py    From airflow with Apache License 2.0 5 votes vote down vote up
def open_slots(self, session: Session):
        """
        Get the number of slots open at the moment.

        :param session: SQLAlchemy ORM Session
        :return: the number of slots
        """
        if self.slots == -1:
            return float('inf')
        else:
            return self.slots - self.occupied_slots(session) 
Example #30
Source File: pool.py    From airflow with Apache License 2.0 5 votes vote down vote up
def occupied_slots(self, session: Session):
        """
        Get the number of slots used by running/queued tasks at the moment.

        :param session: SQLAlchemy ORM Session
        :return: the used number of slots
        """
        from airflow.models.taskinstance import TaskInstance  # Avoid circular import
        return (
            session
            .query(func.sum(TaskInstance.pool_slots))
            .filter(TaskInstance.pool == self.pool)
            .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
            .scalar()
        ) or 0