Python typing.Deque() Examples

The following are 30 code examples of typing.Deque(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module typing , or try the search function .
Example #1
Source File: help_channels.py    From bot with MIT License 7 votes vote down vote up
def __init__(self, bot: Bot):
        super().__init__()

        self.bot = bot

        # Categories
        self.available_category: discord.CategoryChannel = None
        self.in_use_category: discord.CategoryChannel = None
        self.dormant_category: discord.CategoryChannel = None

        # Queues
        self.channel_queue: asyncio.Queue[discord.TextChannel] = None
        self.name_queue: t.Deque[str] = None

        self.name_positions = self.get_names()
        self.last_notification: t.Optional[datetime] = None

        # Asyncio stuff
        self.queue_tasks: t.List[asyncio.Task] = []
        self.ready = asyncio.Event()
        self.on_message_lock = asyncio.Lock()
        self.init_task = self.bot.loop.create_task(self.init_cog()) 
Example #2
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 7 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #3
Source File: copy.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def backward(ctx: Context,
                 *grad_output: Tensor,
                 ) -> Tuple[Optional[Tensor], ...]:
        prev_stream = ctx.prev_stream
        next_stream = ctx.next_stream

        grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
        input_stream = current_stream(get_device(prev_stream))

        with use_stream(prev_stream), use_stream(next_stream):
            for x in reversed(grad_output):
                y = x.to(get_device(prev_stream))
                grad_input.appendleft(y)

                # 'next_stream' is not where 'x' has been allocated.
                record_stream(x, next_stream)
                # 'y' has been allocated on 'prev_stream'.
                # It might be used on the current stream captured as 'input_stream'.
                record_stream(y, input_stream)

        grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
        return grad_streams + tuple(grad_input) 
Example #4
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #5
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #6
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> TensorOrTensors:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        save_rng_states(input[0].device, ctx.rng_states)

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        with torch.no_grad(), enable_checkpointing():
            output = function(input[0] if input_atomic else input)

        return output 
Example #7
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> Tensor:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        return phony 
Example #8
Source File: copy.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def backward(ctx: Context,
                 *grad_output: Tensor,
                 ) -> Tuple[Optional[Tensor], ...]:
        prev_stream = ctx.prev_stream
        next_stream = ctx.next_stream

        grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
        input_stream = current_stream(get_device(prev_stream))

        with use_stream(prev_stream), use_stream(next_stream):
            for x in reversed(grad_output):
                y = x.to(get_device(prev_stream))
                grad_input.appendleft(y)

                # 'next_stream' is not where 'x' has been allocated.
                record_stream(x, next_stream)
                # 'y' has been allocated on 'prev_stream'.
                # It might be used on the current stream captured as 'input_stream'.
                record_stream(y, input_stream)

        grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
        return grad_streams + tuple(grad_input) 
Example #9
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #10
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #11
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> TensorOrTensors:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        save_rng_states(input[0].device, ctx.rng_states)

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        with torch.no_grad(), enable_checkpointing():
            output = function(input[0] if input_atomic else input)

        return output 
Example #12
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> Tensor:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        return phony 
Example #13
Source File: copy.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def backward(ctx: Context,
                 *grad_output: Tensor,
                 ) -> Tuple[Optional[Tensor], ...]:
        prev_stream = ctx.prev_stream
        next_stream = ctx.next_stream

        grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
        input_stream = current_stream(get_device(prev_stream))

        with use_stream(prev_stream), use_stream(next_stream):
            for x in reversed(grad_output):
                y = x.to(get_device(prev_stream))
                grad_input.appendleft(y)

                # 'next_stream' is not where 'x' has been allocated.
                record_stream(x, next_stream)
                # 'y' has been allocated on 'prev_stream'.
                # It might be used on the current stream captured as 'input_stream'.
                record_stream(y, input_stream)

        grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
        return grad_streams + tuple(grad_input) 
Example #14
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #15
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #16
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> TensorOrTensors:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        save_rng_states(input[0].device, ctx.rng_states)

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        with torch.no_grad(), enable_checkpointing():
            output = function(input[0] if input_atomic else input)

        return output 
Example #17
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> Tensor:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        return phony 
Example #18
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #19
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #20
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #21
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> TensorOrTensors:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        save_rng_states(input[0].device, ctx.rng_states)

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        with torch.no_grad(), enable_checkpointing():
            output = function(input[0] if input_atomic else input)

        return output 
Example #22
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> Tensor:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        return phony 
Example #23
Source File: copy.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def backward(ctx: Context,
                 *grad_output: Tensor,
                 ) -> Tuple[Optional[Tensor], ...]:
        prev_stream = ctx.prev_stream
        next_stream = ctx.next_stream

        grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
        input_stream = current_stream(get_device(prev_stream))

        with use_stream(prev_stream), use_stream(next_stream):
            for x in reversed(grad_output):
                y = x.to(get_device(prev_stream))
                grad_input.appendleft(y)

                # 'next_stream' is not where 'x' has been allocated.
                record_stream(x, next_stream)
                # 'y' has been allocated on 'prev_stream'.
                # It might be used on the current stream captured as 'input_stream'.
                record_stream(y, input_stream)

        grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
        return grad_streams + tuple(grad_input) 
Example #24
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def save_rng_states(device: torch.device,
                    rng_states: Deque[RNGStates],
                    ) -> None:
    """:meth:`Checkpoint.forward` captures the current PyTorch's random number
    generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state = torch.get_rng_state()

    gpu_rng_state: Optional[ByteTensor]
    if device.type == 'cuda':
        gpu_rng_state = torch.cuda.get_rng_state(device)
    else:
        gpu_rng_state = None

    rng_states.append((cpu_rng_state, gpu_rng_state)) 
Example #25
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def restore_rng_states(device: torch.device,
                       rng_states: Deque[RNGStates],
                       ) -> Generator[None, None, None]:
    """:meth:`Recompute.backward` restores the random number generator states
    captured by :func:`save_rng_states` within its context.

    .. seealso:: :ref:`Referential Transparency`

    """
    cpu_rng_state, gpu_rng_state = rng_states.pop()

    gpu_devices: List[torch.device] = []
    if device.type == 'cuda':
        gpu_devices.append(device)

    with torch.random.fork_rng(gpu_devices):
        torch.set_rng_state(cpu_rng_state)
        if gpu_rng_state is not None:
            torch.cuda.set_rng_state(gpu_rng_state, device)
        yield 
Example #26
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> TensorOrTensors:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        save_rng_states(input[0].device, ctx.rng_states)

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        with torch.no_grad(), enable_checkpointing():
            output = function(input[0] if input_atomic else input)

        return output 
Example #27
Source File: checkpoint.py    From torchgpipe with Apache License 2.0 6 votes vote down vote up
def forward(ctx: Context,  # type: ignore
                phony: Tensor,
                recomputed: Deque[Recomputed],
                rng_states: Deque[RNGStates],
                function: Function,
                input_atomic: bool,
                *input: Tensor,
                ) -> Tensor:
        ctx.recomputed = recomputed
        ctx.rng_states = rng_states

        ctx.function = function
        ctx.input_atomic = input_atomic
        ctx.save_for_backward(*input)

        return phony 
Example #28
Source File: main.py    From grpclib with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _type_names(
    proto_file: FileDescriptorProto,
    message_type: DescriptorProto,
    parents: Optional[Deque[str]] = None,
) -> Iterator[Tuple[str, str]]:
    if parents is None:
        parents = deque()

    proto_name_parts = ['']
    if proto_file.package:
        proto_name_parts.append(proto_file.package)
    proto_name_parts.extend(parents)
    proto_name_parts.append(message_type.name)

    py_name_parts = [_proto2pb2_module_name(proto_file.name)]
    py_name_parts.extend(parents)
    py_name_parts.append(message_type.name)

    yield '.'.join(proto_name_parts), '.'.join(py_name_parts)

    parents.append(message_type.name)
    for nested in message_type.nested_type:
        yield from _type_names(proto_file, nested, parents=parents)
    parents.pop() 
Example #29
Source File: fixtures.py    From pytest with MIT License 6 votes vote down vote up
def reorder_items(items: "Sequence[nodes.Item]") -> "List[nodes.Item]":
    argkeys_cache = {}  # type: Dict[int, Dict[nodes.Item, Dict[_Key, None]]]
    items_by_argkey = {}  # type: Dict[int, Dict[_Key, Deque[nodes.Item]]]
    for scopenum in range(0, scopenum_function):
        d = {}  # type: Dict[nodes.Item, Dict[_Key, None]]
        argkeys_cache[scopenum] = d
        item_d = defaultdict(deque)  # type: Dict[_Key, Deque[nodes.Item]]
        items_by_argkey[scopenum] = item_d
        for item in items:
            # cast is a workaround for https://github.com/python/typeshed/issues/3800.
            keys = cast(
                "Dict[_Key, None]",
                order_preserving_dict.fromkeys(
                    get_parametrized_fixture_keys(item, scopenum), None
                ),
            )
            if keys:
                d[item] = keys
                for key in keys:
                    item_d[key].append(item)
    # cast is a workaround for https://github.com/python/typeshed/issues/3800.
    items_dict = cast(
        "Dict[nodes.Item, None]", order_preserving_dict.fromkeys(items, None)
    )
    return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, 0)) 
Example #30
Source File: helper_functions.py    From rl_algorithms with MIT License 6 votes vote down vote up
def get_n_step_info_from_demo(
    demo: List, n_step: int, gamma: float
) -> Tuple[List, List]:
    """Return 1 step and n step demos."""
    assert n_step > 1

    demos_1_step = list()
    demos_n_step = list()
    n_step_buffer: Deque = deque(maxlen=n_step)

    for transition in demo:
        n_step_buffer.append(transition)

        if len(n_step_buffer) == n_step:
            # add a single step transition
            demos_1_step.append(n_step_buffer[0])

            # add a multi step transition
            curr_state, action = n_step_buffer[0][:2]
            reward, next_state, done = get_n_step_info(n_step_buffer, gamma)
            transition = (curr_state, action, reward, next_state, done)
            demos_n_step.append(transition)

    return demos_1_step, demos_n_step