Python typing.AsyncGenerator() Examples

The following are 30 code examples of typing.AsyncGenerator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module typing , or try the search function .
Example #1
Source File: test_helpers.py    From quart with MIT License 7 votes vote down vote up
def test_stream_with_context() -> None:
    app = Quart(__name__)

    @app.route("/")
    async def index() -> AsyncGenerator[bytes, None]:
        @stream_with_context
        async def generator() -> AsyncGenerator[bytes, None]:
            yield request.method.encode()
            yield b" "
            yield request.path.encode()

        return generator()

    test_client = app.test_client()
    response = await test_client.get("/")
    result = await response.get_data(raw=True)
    assert result == b"GET /"  # type: ignore 
Example #2
Source File: master.py    From bandersnatch with Academic Free License v3.0 6 votes vote down vote up
def get(
        self, path: str, required_serial: Optional[int], **kw: Any
    ) -> AsyncGenerator[aiohttp.ClientResponse, None]:
        logger.debug(f"Getting {path} (serial {required_serial})")
        if not path.startswith(("https://", "http://")):
            path = self.url + path

        async with self.session.get(path, **kw) as r:
            got_serial = (
                int(r.headers[PYPI_SERIAL_HEADER])
                if PYPI_SERIAL_HEADER in r.headers
                else None
            )
            await self.check_for_stale_cache(path, required_serial, got_serial)
            yield r

    # TODO: Add storage backend support / refactor - #554 
Example #3
Source File: base.py    From python-prompt-toolkit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_completions_async(
        self, document: Document, complete_event: CompleteEvent
    ) -> AsyncGenerator[Completion, None]:

        # Get all completions from the other completers in a blocking way.
        for completer in self.completers:
            async for item in completer.get_completions_async(document, complete_event):
                yield item 
Example #4
Source File: local_schema.py    From gql with MIT License 5 votes vote down vote up
def subscribe(
        self, document: DocumentNode, *args, **kwargs,
    ) -> AsyncGenerator[ExecutionResult, None]:
        """Send a subscription and receive the results using an async generator

        The results are sent as an ExecutionResult object
        """

        subscribe_result = await subscribe(self.schema, document, *args, **kwargs)

        if isinstance(subscribe_result, ExecutionResult):
            yield subscribe_result

        else:
            async for result in subscribe_result:
                yield result 
Example #5
Source File: contexts.py    From website with MIT License 5 votes vote down vote up
def apscheduler(app: web.Application) -> AsyncGenerator[None, None]:
    scheduler = AsyncIOScheduler()
    _register_in_app(app, "scheduler", scheduler)
    scheduler.start()
    yield

    if scheduler.running:
        scheduler.shutdown() 
Example #6
Source File: pipeline.py    From chia-blockchain with Apache License 2.0 5 votes vote down vote up
def connection_to_outbound(
    pair: Tuple[Connection, PeerConnections],
) -> AsyncGenerator[Tuple[Connection, OutboundMessage, PeerConnections], None]:
    """
    Async generator which calls the on_connect async generator method, and yields any outbound messages.
    """
    connection, global_connections = pair
    if connection.on_connect:
        async for outbound_message in connection.on_connect():
            yield connection, outbound_message, global_connections 
Example #7
Source File: introducer.py    From chia-blockchain with Apache License 2.0 5 votes vote down vote up
def request_peers(
        self, request: RequestPeers
    ) -> AsyncGenerator[OutboundMessage, None]:
        max_peers = self.max_peers_to_send
        rawpeers = self.global_connections.peers.get_peers(
            max_peers * 2, True, self.recent_peer_threshold
        )

        peers = []

        for peer in rawpeers:
            if peer.get_hash() not in self.vetted:
                try:
                    r, w = await asyncio.open_connection(peer.host, int(peer.port))
                    w.close()
                except (
                    ConnectionRefusedError,
                    TimeoutError,
                    OSError,
                    asyncio.TimeoutError,
                ) as e:
                    log.warning(f"Could not vet {peer}. {type(e)}{str(e)}")
                    self.vetted[peer.get_hash()] = False
                    continue

                log.info(f"Have vetted {peer} successfully!")
                self.vetted[peer.get_hash()] = True

            if self.vetted[peer.get_hash()]:
                peers.append(peer)

            if len(peers) >= max_peers:
                break

        log.info(f"Sending vetted {peers}")

        msg = Message("respond_peers", RespondPeers(peers))
        yield OutboundMessage(NodeType.FULL_NODE, msg, Delivery.RESPOND)
        yield OutboundMessage(NodeType.WALLET, msg, Delivery.RESPOND) 
Example #8
Source File: workspacefs.py    From parsec-cloud with GNU Affero General Public License v3.0 5 votes vote down vote up
def iterdir(self, path: AnyPath) -> AsyncGenerator[FsPath, None]:
        """
        Raises:
            FSError
        """
        path = FsPath(path)
        info = await self.transactions.entry_info(path)
        if "children" not in info:
            raise FSNotADirectoryError(filename=str(path))
        for child in info["children"]:
            yield path / child 
Example #9
Source File: pool.py    From ant_nest with GNU Lesser General Public License v3.0 5 votes vote down vote up
def as_completed_with_async(
        self,
        coros: typing.Iterable[typing.Awaitable],
        limit: int = 50,
        raise_exception: bool = True,
    ) -> typing.AsyncGenerator[typing.Any, None]:
        """as_completed`s async version, can catch and log exception inside.
        """
        for coro in self.as_completed(coros, limit=limit):
            try:
                yield await coro
            except Exception as e:
                if raise_exception:
                    raise e
                else:
                    self.logger.exception(
                        "Get exception {:s} in "
                        '"as_completed_with_async"'.format(str(e))
                    ) 
Example #10
Source File: impl.py    From tartiflette-asgi with MIT License 5 votes vote down vote up
def get_stream(self, opid: str, payload: dict) -> typing.AsyncGenerator:
        context = {**payload.get("context", {}), **self.context}
        return self.engine.subscribe(
            query=payload.get("query"),
            variables=payload.get("variables"),
            operation_name=payload.get("operationName"),
            context=context,
        ) 
Example #11
Source File: protocol.py    From tartiflette-asgi with MIT License 5 votes vote down vote up
def __init__(self) -> None:
        self._operations: typing.Dict[str, typing.AsyncGenerator] = {}

    # Methods whose implementation is left to the implementer. 
Example #12
Source File: protocol.py    From tartiflette-asgi with MIT License 5 votes vote down vote up
def get_stream(self, opid: str, payload: dict) -> typing.AsyncGenerator:
        raise NotImplementedError

    # Helpers. 
Example #13
Source File: base_async_client.py    From py-stellar-base with Apache License 2.0 5 votes vote down vote up
def stream(
        self, url: str, params: Dict[str, str] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """Creates an EventSource that listens for incoming messages from the server.

        See `Horizon Response Format <https://www.stellar.org/developers/horizon/reference/responses.html>`_

        See `MDN EventSource <https://developer.mozilla.org/en-US/docs/Web/API/EventSource>`_

        :param url: the request url
        :param params: the request params
        :return: a dict AsyncGenerator for server response
        :raise: :exc:`ConnectionError <stellar_sdk.exceptions.ConnectionError>`
        """
        pass 
Example #14
Source File: base.py    From python-prompt-toolkit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_completions_async(
        self, document: Document, complete_event: CompleteEvent
    ) -> AsyncGenerator[Completion, None]:
        """
        Asynchronous generator for completions. (Probably, you won't have to
        override this.)

        Asynchronous generator of :class:`.Completion` objects.
        """
        for item in self.get_completions(document, complete_event):
            yield item 
Example #15
Source File: base.py    From python-prompt-toolkit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_completions_async(
        self, document: Document, complete_event: CompleteEvent
    ) -> AsyncGenerator[Completion, None]:
        """
        Asynchronous generator of completions.
        """
        async for completion in generator_to_async_generator(
            lambda: self.completer.get_completions(document, complete_event)
        ):
            yield completion 
Example #16
Source File: base.py    From python-prompt-toolkit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_completions_async(
        self, document: Document, complete_event: CompleteEvent
    ) -> AsyncGenerator[Completion, None]:
        completer = self.get_completer() or DummyCompleter()

        async for completion in completer.get_completions_async(
            document, complete_event
        ):
            yield completion 
Example #17
Source File: client.py    From gql with MIT License 5 votes vote down vote up
def _subscribe(
        self, document: DocumentNode, *args, **kwargs
    ) -> AsyncGenerator[ExecutionResult, None]:

        # Fetch schema from transport if needed and validate document if possible
        await self.fetch_and_validate(document)

        # Subscribe to the transport
        inner_generator: AsyncGenerator[
            ExecutionResult, None
        ] = self.transport.subscribe(document, *args, **kwargs)

        # Keep a reference to the inner generator to allow the user to call aclose()
        # before a break if python version is too old (pypy3 py 3.6.1)
        self._generator = inner_generator

        async for result in inner_generator:
            if result.errors:
                # Note: we need to run generator.aclose() here or the finally block in
                # transport.subscribe will not be reached in pypy3 (py 3.6.1)
                await inner_generator.aclose()

            yield result 
Example #18
Source File: test_typing.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def test_async_generator(self):
        ns = {}
        exec("async def f():\n"
             "    yield 42\n", globals(), ns)
        g = ns['f']()
        self.assertIsSubclass(type(g), typing.AsyncGenerator) 
Example #19
Source File: test_typing.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def test_no_async_generator_instantiation(self):
        with self.assertRaises(TypeError):
            typing.AsyncGenerator()
        with self.assertRaises(TypeError):
            typing.AsyncGenerator[T, T]()
        with self.assertRaises(TypeError):
            typing.AsyncGenerator[int, int]() 
Example #20
Source File: eth1_monitor.py    From trinity with MIT License 5 votes vote down vote up
def _new_blocks(self) -> AsyncGenerator[Eth1Block, None]:
        """
        Keep polling latest blocks, and yield the blocks whose number is
        `latest_block.number - self._num_blocks_confirmed`.
        """
        while True:
            try:
                block = self._eth1_data_provider.get_block("latest")
            except BlockNotFound:
                raise Eth1MonitorValidationError("Fail to get latest block")
            target_block_number = BlockNumber(block.number - self._num_blocks_confirmed)
            from_block_number = self.highest_processed_block_number
            if target_block_number > from_block_number:
                # From `highest_processed_block_number` to `target_block_number`
                for block_number in range(
                    from_block_number + 1, target_block_number + 1
                ):
                    try:
                        block = self._eth1_data_provider.get_block(
                            BlockNumber(block_number)
                        )
                    except BlockNotFound:
                        raise Eth1MonitorValidationError(
                            f"Block does not exist for block number={block_number}"
                        )
                    yield block
            await trio.sleep(self._polling_period) 
Example #21
Source File: _utils.py    From trinity with MIT License 5 votes vote down vote up
def __init__(self, aiter: AsyncGenerator[TCo, TContra]) -> None:
        self._aiter = aiter 
Example #22
Source File: _utils.py    From trinity with MIT License 5 votes vote down vote up
def __aenter__(self) -> AsyncGenerator[TCo, TContra]:
        return self._aiter 
Example #23
Source File: base.py    From lightbus with Apache License 2.0 5 votes vote down vote up
def history(
        self,
        api_name,
        event_name,
        start: datetime = None,
        stop: datetime = None,
        start_inclusive: bool = True,
    ) -> AsyncGenerator[EventMessage, None]:
        """Return EventMessages for the given api/event names during the (optionally) given date range.

        Should return newest messages first
        """
        raise NotImplementedError(
            f"Event transport {self.__class__.__name__} does not support event history."
        ) 
Example #24
Source File: debug.py    From lightbus with Apache License 2.0 5 votes vote down vote up
def consume(
        self, listen_for: List[Tuple[str, str]], listener_name: str, **kwargs
    ) -> AsyncGenerator[EventMessage, None]:
        """Consume RPC events for the given API"""
        self._sanity_check_listen_for(listen_for)

        logger.info("⌛ Faking listening for events {}.".format(self._events))

        while True:
            await asyncio.sleep(0.1)
            yield [self._get_fake_message()] 
Example #25
Source File: debug.py    From lightbus with Apache License 2.0 5 votes vote down vote up
def history(
        self,
        api_name,
        event_name,
        start: datetime = None,
        stop: datetime = None,
        start_inclusive: bool = True,
    ) -> AsyncGenerator[EventMessage, None]:
        yield self._get_fake_message() 
Example #26
Source File: request.py    From bocadillo with MIT License 5 votes vote down vote up
def __aiter__(self) -> typing.AsyncGenerator[bytes, None]:
        async for chunk in self.stream():
            yield chunk 
Example #27
Source File: test_db_tortoise.py    From fastapi-users with MIT License 5 votes vote down vote up
def tortoise_user_db() -> AsyncGenerator[TortoiseUserDatabase, None]:
    DATABASE_URL = "sqlite://./test-tortoise-user.db"

    await Tortoise.init(
        db_url=DATABASE_URL, modules={"models": ["tests.test_db_tortoise"]}
    )
    await Tortoise.generate_schemas()

    yield TortoiseUserDatabase(UserDB, User)

    await User.all().delete()
    await Tortoise.close_connections() 
Example #28
Source File: test_db_tortoise.py    From fastapi-users with MIT License 5 votes vote down vote up
def tortoise_user_db_oauth() -> AsyncGenerator[TortoiseUserDatabase, None]:
    DATABASE_URL = "sqlite://./test-tortoise-user-oauth.db"

    await Tortoise.init(
        db_url=DATABASE_URL, modules={"models": ["tests.test_db_tortoise"]}
    )
    await Tortoise.generate_schemas()

    yield TortoiseUserDatabase(UserDBOAuth, User, OAuthAccount)

    await User.all().delete()
    await Tortoise.close_connections() 
Example #29
Source File: paralleltransfer.py    From tgfilestream with GNU Affero General Public License v3.0 5 votes vote down vote up
def _int_download(self, request: GetFileRequest, first_part: int, last_part: int,
                            part_count: int, part_size: int, dc_id: int, first_part_cut: int,
                            last_part_cut: int) -> AsyncGenerator[bytes, None]:
        log = self.log
        try:
            part = first_part
            dcm = self.dc_managers[dc_id]
            async with dcm.get_connection() as conn:
                log = conn.log
                while part <= last_part:
                    result = await conn.sender.send(request)
                    request.offset += part_size
                    if part == first_part:
                        yield result.bytes[first_part_cut:]
                    elif part == last_part:
                        yield result.bytes[:last_part_cut]
                    else:
                        yield result.bytes
                    log.debug(f"Part {part}/{last_part} (total {part_count}) downloaded")
                    part += 1
                log.debug("Parallel download finished")
        except (GeneratorExit, StopAsyncIteration, asyncio.CancelledError):
            log.debug("Parallel download interrupted")
            raise
        except Exception:
            log.debug("Parallel download errored", exc_info=True) 
Example #30
Source File: paralleltransfer.py    From tgfilestream with GNU Affero General Public License v3.0 5 votes vote down vote up
def download(self, file: TypeLocation, file_size: int, offset: int, limit: int
                 ) -> AsyncGenerator[bytes, None]:
        dc_id, location = utils.get_input_location(file)
        part_size = 512 * 1024
        first_part_cut = offset % part_size
        first_part = math.floor(offset / part_size)
        last_part_cut = part_size - (limit % part_size)
        last_part = math.ceil(limit / part_size)
        part_count = math.ceil(file_size / part_size)
        self.log.debug(f"Starting parallel download: chunks {first_part}-{last_part}"
                       f" of {part_count} {location!s}")
        request = GetFileRequest(location, offset=first_part * part_size, limit=part_size)

        return self._int_download(request, first_part, last_part, part_count, part_size, dc_id,
                                  first_part_cut, last_part_cut)