Python asyncio.gather() Examples

The following are 30 code examples of asyncio.gather(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module asyncio , or try the search function .
Example #1
Source File: test_asyncio.py    From opentracing-python with Apache License 2.0 7 votes vote down vote up
def test_main(self):
        # Need to run within a Task, as the scope manager depends
        # on Task.current_task()
        async def main_task():
            with self.tracer.start_active_span('parent'):
                tasks = self.submit_callbacks()
                await asyncio.gather(*tasks)

        self.loop.create_task(main_task())

        stop_loop_when(self.loop,
                       lambda: len(self.tracer.finished_spans()) >= 4)
        self.loop.run_forever()

        spans = self.tracer.finished_spans()
        self.assertEquals(len(spans), 4)
        self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])

        for i in range(3):
            self.assertSameTrace(spans[i], spans[-1])
            self.assertIsChildOf(spans[i], spans[-1]) 
Example #2
Source File: shell_tools.py    From OpenFermion-Cirq with Apache License 2.0 7 votes vote down vote up
def _async_wait_for_process(
        future_process: Any,
        out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout,
        err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr
) -> CommandOutput:
    """Awaits the creation and completion of an asynchronous process.

    Args:
        future_process: The eventually created process.
        out: Where to write stuff emitted by the process' stdout.
        err: Where to write stuff emitted by the process' stderr.

    Returns:
        A (captured output, captured error output, return code) triplet.
    """
    process = await future_process
    future_output = _async_forward(process.stdout, out)
    future_err_output = _async_forward(process.stderr, err)
    output, err_output = await asyncio.gather(future_output, future_err_output)
    await process.wait()

    return CommandOutput(output, err_output, process.returncode) 
Example #3
Source File: backend.py    From friendly-telegram with GNU Affero General Public License v3.0 7 votes vote down vote up
def _do_ops(self, ops):
        try:
            for r in await asyncio.gather(*ops, return_exceptions=True):
                if isinstance(r, MessageNotModifiedError):
                    logging.debug("db not modified", exc_info=r)
                elif isinstance(r, Exception):
                    raise r  # Makes more sense to raise even for MessageEditTimeExpiredError
                elif not isinstance(r, Message):
                    logging.debug("unknown ret from gather, %r", r)
        except MessageEditTimeExpiredError:
            logging.debug("Making new channel.")
            _db = self.db
            self.db = None
            await self._client(DeleteChannelRequest(channel=_db))
            return True
        return False 
Example #4
Source File: app.py    From quart with MIT License 7 votes vote down vote up
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
    tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
    if not tasks:
        return

    for task in tasks:
        task.cancel()
    loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))

    for task in tasks:
        if not task.cancelled() and task.exception() is not None:
            loop.call_exception_handler(
                {
                    "message": "unhandled exception during shutdown",
                    "exception": task.exception(),
                    "task": task,
                }
            ) 
Example #5
Source File: transaction.py    From aioredis with MIT License 6 votes vote down vote up
def pipeline(self):
        """Returns :class:`Pipeline` object to execute bulk of commands.

        It is provided for convenience.
        Commands can be pipelined without it.

        Example:

        >>> pipe = redis.pipeline()
        >>> fut1 = pipe.incr('foo') # NO `await` as it will block forever!
        >>> fut2 = pipe.incr('bar')
        >>> result = await pipe.execute()
        >>> result
        [1, 1]
        >>> await asyncio.gather(fut1, fut2)
        [1, 1]
        >>> #
        >>> # The same can be done without pipeline:
        >>> #
        >>> fut1 = redis.incr('foo')    # the 'INCRY foo' command already sent
        >>> fut2 = redis.incr('bar')
        >>> await asyncio.gather(fut1, fut2)
        [2, 2]
        """
        return Pipeline(self._pool_or_conn, self.__class__) 
Example #6
Source File: unix_ls_on_device_async.py    From moler with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _cleanup_remaining_tasks(loop, logger):
    # https://stackoverflow.com/questions/30765606/whats-the-correct-way-to-clean-up-after-an-interrupted-event-loop
    # https://medium.com/python-pandemonium/asyncio-coroutine-patterns-beyond-await-a6121486656f
    # Handle shutdown gracefully by waiting for all tasks to be cancelled
    not_done_tasks = [task for task in asyncio.Task.all_tasks(loop=loop) if not task.done()]
    if not_done_tasks:
        logger.info("cancelling all remaining tasks")
        # NOTE: following code cancels all tasks - possibly not ours as well
        remaining_tasks = asyncio.gather(*not_done_tasks, loop=loop, return_exceptions=True)
        remaining_tasks.add_done_callback(lambda t: loop.stop())
        logger.debug("remaining tasks = {}".format(not_done_tasks))
        remaining_tasks.cancel()

        # Keep the event loop running until it is either destroyed or all
        # tasks have really terminated
        loop.run_until_complete(remaining_tasks) 
Example #7
Source File: asyncio_common.py    From moler with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _cleanup_remaining_tasks(loop, logger):
    # https://stackoverflow.com/questions/30765606/whats-the-correct-way-to-clean-up-after-an-interrupted-event-loop
    # https://medium.com/python-pandemonium/asyncio-coroutine-patterns-beyond-await-a6121486656f
    # Handle shutdown gracefully by waiting for all tasks to be cancelled
    not_done_tasks = [task for task in asyncio.Task.all_tasks(loop=loop) if not task.done()]
    if not_done_tasks:
        logger.info("cancelling all remaining tasks")
        # NOTE: following code cancels all tasks - possibly not ours as well
        remaining_tasks = asyncio.gather(*not_done_tasks, loop=loop, return_exceptions=True)
        remaining_tasks.add_done_callback(lambda t: loop.stop())
        logger.debug("remaining tasks = {}".format(not_done_tasks))
        remaining_tasks.cancel()

        # Keep the event loop running until it is either destroyed or all
        # tasks have really terminated
        loop.run_until_complete(remaining_tasks) 
Example #8
Source File: loop.py    From query-exporter with GNU General Public License v3.0 6 votes vote down vote up
def stop(self):
        """Stop timed query execution."""
        coros = (call.stop() for call in self._timed_calls.values())
        await asyncio.gather(*coros, return_exceptions=True)
        self._timed_calls.clear()
        coros = (db.close() for db in self._databases)
        await asyncio.gather(*coros, return_exceptions=True) 
Example #9
Source File: test_contextvars.py    From opentracing-python with Apache License 2.0 6 votes vote down vote up
def test_main(self):
        # Need to run within a Task, as the scope manager depends
        # on Task.current_task()
        async def main_task():
            with self.tracer.start_active_span('parent'):
                tasks = self.submit_callbacks()
                await asyncio.gather(*tasks)

        self.loop.create_task(main_task())

        stop_loop_when(self.loop,
                       lambda: len(self.tracer.finished_spans()) >= 4)
        self.loop.run_forever()

        spans = self.tracer.finished_spans()
        self.assertEqual(len(spans), 4)
        self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])

        for i in range(3):
            self.assertSameTrace(spans[i], spans[-1])
            self.assertIsChildOf(spans[i], spans[-1]) 
Example #10
Source File: test_local.py    From quart with MIT License 6 votes vote down vote up
def test_task_local() -> None:
    local_ = TaskLocal()
    queue: asyncio.Queue = asyncio.Queue()
    tasks = 2
    for _ in range(tasks):
        queue.put_nowait(None)

    async def _test_local(value: int) -> int:
        local_.test = value
        await queue.get()
        queue.task_done()
        await queue.join()
        return local_.test

    futures = [asyncio.ensure_future(_test_local(value)) for value in range(tasks)]
    asyncio.gather(*futures)
    for value, future in enumerate(futures):
        assert (await future) == value 
Example #11
Source File: list_commands_test.py    From aioredis with MIT License 6 votes vote down vote up
def test_brpoplpush_blocking_features(redis, create_redis, server):
    source = b'key:brpoplpush:12'
    value = b'brpoplpush:value:2'
    destkey = b'destkey:brpoplpush:2'
    other_redis = await create_redis(
        server.tcp_address)
    # create blocking task
    consumer_task = other_redis.brpoplpush(source, destkey)
    producer_task = asyncio.ensure_future(
        push_data_with_sleep(redis, source, value))
    results = await asyncio.gather(consumer_task, producer_task)
    assert results[0] == value
    assert results[1] == 1

    # make sure that all values stored in new destkey list
    test_value = await redis.lrange(destkey, 0, -1)
    assert test_value == [value]

    # wait for data with timeout, list is emtpy, so brpoplpush should
    # return None in 1 sec
    waiter = redis.brpoplpush(source, destkey, timeout=1)
    test_value = await waiter
    assert test_value is None
    other_redis.close() 
Example #12
Source File: pubsub_commands_test.py    From aioredis with MIT License 6 votes vote down vote up
def test_subscribe_concurrency(create_redis, server):
    sub = await create_redis(server.tcp_address)
    pub = await create_redis(server.tcp_address)

    async def subscribe(*args):
        return await sub.subscribe(*args)

    async def publish(*args):
        await asyncio.sleep(0)
        return await pub.publish(*args)

    res = await asyncio.gather(
        subscribe('channel:0'),
        publish('channel:0', 'Hello'),
        subscribe('channel:1'),
        )
    (ch1,), subs, (ch2,) = res

    assert ch1.name == b'channel:0'
    assert subs == 1
    assert ch2.name == b'channel:1' 
Example #13
Source File: import_ghcn_file.py    From hsds with Apache License 2.0 6 votes vote down vote up
def import_file(filename):
    log.info("import_file: {}".format(filename))
    loop = globals["loop"]
    max_concurrent_tasks = config.get("max_concurrent_tasks")
    tasks = []
    with open(filename, 'r') as fh:
        for line in fh:
            line = line.rstrip()
            #loop.run_until_complete(import_line(line))
            tasks.append(asyncio.ensure_future(import_line(line)))
            if len(tasks) < max_concurrent_tasks:
                continue  # get next line
            # got a batch, move them out!
            loop.run_until_complete(asyncio.gather(*tasks))
            tasks = []
    # finish any remaining tasks
    loop.run_until_complete(asyncio.gather(*tasks))
    globals["files_read"] += 1 
Example #14
Source File: pool_test.py    From aioredis with MIT License 6 votes vote down vote up
def test_pool_size_growth(create_pool, server):
    pool = await create_pool(
        server.tcp_address,
        minsize=1, maxsize=1)

    done = set()
    tasks = []

    async def task1(i):
        with (await pool):
            assert pool.size <= pool.maxsize
            assert pool.freesize == 0
            await asyncio.sleep(0.2)
            done.add(i)

    async def task2():
        with (await pool):
            assert pool.size <= pool.maxsize
            assert pool.freesize >= 0
            assert done == {0, 1}

    for _ in range(2):
        tasks.append(asyncio.ensure_future(task1(_)))
    tasks.append(asyncio.ensure_future(task2()))
    await asyncio.gather(*tasks) 
Example #15
Source File: pool.py    From aioredis with MIT License 6 votes vote down vote up
def execute_pubsub(self, command, *channels):
        """Executes Redis (p)subscribe/(p)unsubscribe commands.

        ConnectionsPool picks separate connection for pub/sub
        and uses it until explicitly closed or disconnected
        (unsubscribing from all channels/patterns will leave connection
         locked for pub/sub use).

        There is no auto-reconnect for this PUB/SUB connection.

        Returns asyncio.gather coroutine waiting for all channels/patterns
        to receive answers.
        """
        conn, address = self.get_connection(command)
        if conn is not None:
            return conn.execute_pubsub(command, *channels)
        else:
            return self._wait_execute_pubsub(address, command, channels, {}) 
Example #16
Source File: core.py    From stoq with Apache License 2.0 6 votes vote down vote up
def _get_dispatches(
        self, payload: Payload, request: Request
    ) -> Tuple[Payload, Set[str]]:
        # Run all dispatchers to form our initial set of worker plugins to run
        worker_plugins: Set[str] = set(
            self.always_dispatch
        ) if self.always_dispatch else set()
        dispatch_results: List[Set[str]] = await asyncio.gather(  # type: ignore
            *[
                self._apply_dispatcher(dispatcher, payload, request)
                for dispatcher in self._loaded_dispatcher_plugins.values()
            ]
        )
        for dispatch_result in dispatch_results:
            worker_plugins.update(dispatch_result)
        return payload, worker_plugins 
Example #17
Source File: pool.py    From aioredis with MIT License 6 votes vote down vote up
def _do_close(self):
        # TODO: lock
        tasks = []
        task, self._monitor_task = self._monitor_task, None
        task.cancel()
        tasks.append(task)
        while self._pools:
            pool = self._pools.pop(0)
            pool.close()
            tasks.append(pool.wait_closed())
        while self._masters:
            _, pool = self._masters.popitem()
            pool.close()
            tasks.append(pool.wait_closed())
        while self._slaves:
            _, pool = self._slaves.popitem()
            pool.close()
            tasks.append(pool.wait_closed())
        await asyncio.gather(*tasks) 
Example #18
Source File: list_commands_test.py    From aioredis with MIT License 6 votes vote down vote up
def test_blpop_blocking_features(redis, create_redis, server):
    key1, key2 = b'key:blpop:1', b'key:blpop:2'
    value = b'blpop:value:2'

    other_redis = await create_redis(server.tcp_address)

    # create blocking task in separate connection
    consumer = other_redis.blpop(key1, key2)

    producer_task = asyncio.ensure_future(
        push_data_with_sleep(redis, key2, value))
    results = await asyncio.gather(consumer, producer_task)

    assert results[0] == [key2, value]
    assert results[1] == 1

    # wait for data with timeout, list is emtpy, so blpop should
    # return None in 1 sec
    waiter = redis.blpop(key1, key2, timeout=1)
    test_value = await waiter
    assert test_value is None
    other_redis.close() 
Example #19
Source File: etcd.py    From backend.ai-manager with GNU Lesser General Public License v3.0 6 votes vote down vote up
def list_images(self) -> Sequence[Mapping[str, Any]]:
        known_registries = await get_known_registries(self.etcd)
        reverse_aliases = await self._scan_reverse_aliases()
        data = await self.etcd.get_prefix('images')
        coros = []
        for registry, images in data.items():
            if registry == '_aliases':
                continue
            for image, tags in images.items():
                if image == '':
                    continue
                if tags == '1':
                    continue
                for tag, image_info in tags.items():
                    if tag == '':
                        continue
                    raw_ref = f'{etcd_unquote(registry)}/{etcd_unquote(image)}:{tag}'
                    ref = ImageRef(raw_ref, known_registries)
                    coros.append(self._parse_image(ref, image_info, reverse_aliases))
        result = await asyncio.gather(*coros)
        return result 
Example #20
Source File: pool_pubsub.py    From aioredis with MIT License 6 votes vote down vote up
def main():
    loop = asyncio.get_event_loop()
    tsk = asyncio.ensure_future(pubsub(), loop=loop)

    async def publish():
        pub = await aioredis.create_redis(
            'redis://localhost')
        while not tsk.done():
            # wait for clients to subscribe
            while True:
                subs = await pub.pubsub_numsub('channel:1')
                if subs[b'channel:1'] == 1:
                    break
                await asyncio.sleep(0, loop=loop)
            # publish some messages
            for msg in ['one', 'two', 'three']:
                await pub.publish('channel:1', msg)
            # send stop word
            await pub.publish('channel:1', STOPWORD)
        pub.close()
        await pub.wait_closed()

    loop.run_until_complete(asyncio.gather(publish(), tsk, loop=loop)) 
Example #21
Source File: test_websocket_exceptions.py    From gql with MIT License 6 votes vote down vote up
def test_websocket_non_regression_bug_105(event_loop, server):

    # This test will check a fix to a race condition which happens if the user is trying
    # to connect using the same client twice at the same time
    # See bug #105

    url = f"ws://{server.hostname}:{server.port}/graphql"
    print(f"url = {url}")

    sample_transport = WebsocketsTransport(url=url)

    client = Client(transport=sample_transport)

    # Create a coroutine which start the connection with the transport but does nothing
    async def client_connect(client):
        async with client:
            await asyncio.sleep(2 * MS)

    # Create two tasks which will try to connect using the same client (not allowed)
    connect_task1 = asyncio.ensure_future(client_connect(client))
    connect_task2 = asyncio.ensure_future(client_connect(client))

    with pytest.raises(TransportAlreadyConnected):
        await asyncio.gather(connect_task1, connect_task2) 
Example #22
Source File: stream.py    From backend.ai-manager with GNU Lesser General Public License v3.0 5 votes vote down vote up
def stream_app_ctx(app: web.Application) -> AsyncIterator[None]:
    app['stream_pty_handlers'] = defaultdict(weakref.WeakSet)
    app['stream_execute_handlers'] = defaultdict(weakref.WeakSet)
    app['stream_proxy_handlers'] = defaultdict(weakref.WeakSet)
    app['stream_stdin_socks'] = defaultdict(weakref.WeakSet)
    app['zctx'] = zmq.asyncio.Context()

    event_dispatcher = app['event_dispatcher']
    event_dispatcher.subscribe('kernel_terminated', app, kernel_terminated)

    yield

    cancelled_tasks: List[asyncio.Task] = []
    for per_kernel_handlers in app['stream_pty_handlers'].values():
        for handler in list(per_kernel_handlers):
            if not handler.done():
                handler.cancel()
                cancelled_tasks.append(handler)
    for per_kernel_handlers in app['stream_execute_handlers'].values():
        for handler in list(per_kernel_handlers):
            if not handler.done():
                handler.cancel()
                cancelled_tasks.append(handler)
    for per_kernel_handlers in app['stream_proxy_handlers'].values():
        for handler in list(per_kernel_handlers):
            if not handler.done():
                handler.cancel()
                cancelled_tasks.append(handler)
    await asyncio.gather(*cancelled_tasks, return_exceptions=True)
    app['zctx'].term() 
Example #23
Source File: doc.py    From bot with MIT License 5 votes vote down vote up
def refresh_inventory(self) -> None:
        """Refresh internal documentation inventory."""
        log.debug("Refreshing documentation inventory...")

        # Clear the old base URLS and inventories to ensure
        # that we start from a fresh local dataset.
        # Also, reset the cache used for fetching documentation.
        self.base_urls.clear()
        self.inventories.clear()
        self.renamed_symbols.clear()
        async_cache.cache = OrderedDict()

        # Run all coroutines concurrently - since each of them performs a HTTP
        # request, this speeds up fetching the inventory data heavily.
        coros = [
            self.update_single(
                package["package"], package["base_url"], package["inventory_url"]
            ) for package in await self.bot.api_client.get('bot/documentation-links')
        ]
        await asyncio.gather(*coros) 
Example #24
Source File: transaction.py    From aioredis with MIT License 5 votes vote down vote up
def _do_execute(self, conn, *, return_exceptions=False):
        await asyncio.gather(*self._send_pipeline(conn),
                             return_exceptions=True)
        return await self._gather_result(return_exceptions) 
Example #25
Source File: registry.py    From backend.ai-manager with GNU Lesser General Public License v3.0 5 votes vote down vote up
def kill_all_sessions(self, conn=None):
        async with reenter_txn(self.dbpool, conn) as conn:
            query = (sa.select([agents.c.addr])
                       .where(agents.c.status == AgentStatus.ALIVE))
            result = await conn.execute(query)
            alive_agent_addrs = [row.addr for row in result]
            log.debug(str(alive_agent_addrs))
            tasks = [self.kill_all_sessions_in_agent(agent_addr)
                     for agent_addr in alive_agent_addrs]
            await asyncio.gather(*tasks) 
Example #26
Source File: pool.py    From aioredis with MIT License 5 votes vote down vote up
def _do_clear(self):
        waiters = []
        while self._pool:
            conn = self._pool.popleft()
            conn.close()
            waiters.append(conn.wait_closed())
        await asyncio.gather(*waiters) 
Example #27
Source File: pool.py    From aioredis with MIT License 5 votes vote down vote up
def _do_close(self):
        async with self._cond:
            assert not self._acquiring, self._acquiring
            waiters = []
            while self._pool:
                conn = self._pool.popleft()
                conn.close()
                waiters.append(conn.wait_closed())
            for conn in self._used:
                conn.close()
                waiters.append(conn.wait_closed())
            await asyncio.gather(*waiters)
            # TODO: close _pubsub_conn connection
            logger.debug("Closed %d connection(s)", len(waiters)) 
Example #28
Source File: registry.py    From backend.ai-manager with GNU Lesser General Public License v3.0 5 votes vote down vote up
def cleanup_agent_peers():
    global agent_peers
    closing_tasks = []
    for addr, peer in agent_peers.items():
        closing_tasks.append(peer.__aexit__(None, None, None))
    await asyncio.gather(*closing_tasks, return_exceptions=True) 
Example #29
Source File: integration_test.py    From aioredis with MIT License 5 votes vote down vote up
def blocking_pop(pool, val):

    async def lpush():
        with await pool as redis:
            # here v0.3 has bound connection, v1.0 does not;
            await asyncio.sleep(.1)
            await redis.lpush('list-key', 'val')

    async def blpop():
        with await pool as redis:
            # here v0.3 has bound connection, v1.0 does not;
            res = await redis.blpop(
                'list-key', timeout=2, encoding='utf-8')
            assert res == ['list-key', 'val'], res
    await asyncio.gather(blpop(), lpush()) 
Example #30
Source File: pool_test.py    From aioredis with MIT License 5 votes vote down vote up
def test_select_and_create(create_pool, server):
    # trying to model situation when select and acquire
    # called simultaneously
    # but acquire freezes on _wait_select and
    # then continues with proper db

    # TODO: refactor this test as there's no _wait_select any more.
    with async_timeout.timeout(10):
        pool = await create_pool(
            server.tcp_address,
            minsize=1, db=0,
            )
        db = 0
        while True:
            db = (db + 1) & 1
            _, conn = await asyncio.gather(pool.select(db),
                                           pool.acquire())
            assert pool.db == db
            pool.release(conn)
            if conn.db == db:
                break
    # await asyncio.wait_for(test(), 3, loop=loop)