Python time.monotonic() Examples

The following are 30 code examples of time.monotonic(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module time , or try the search function .
Example #1
Source File: core.py    From schemathesis with MIT License 8 votes vote down vote up
def execute(self) -> Generator[events.ExecutionEvent, None, None]:
        """Common logic for all runners."""
        results = TestResultSet()

        initialized = events.Initialized.from_schema(schema=self.schema)
        yield initialized

        for event in self._execute(results):
            if (
                self.exit_first
                and isinstance(event, events.AfterExecution)
                and event.status in (Status.error, Status.failure)
            ):
                break
            yield event

        yield events.Finished.from_results(results=results, running_time=time.monotonic() - initialized.start_time) 
Example #2
Source File: wait.py    From misp42splunk with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _retry_on_intr(fn, timeout):
        if timeout is None:
            deadline = float("inf")
        else:
            deadline = monotonic() + timeout

        while True:
            try:
                return fn(timeout)
            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
            except (OSError, select.error) as e:
                # 'e.args[0]' incantation works for both OSError and select.error
                if e.args[0] != errno.EINTR:
                    raise
                else:
                    timeout = deadline - monotonic()
                    if timeout < 0:
                        timeout = 0
                    if timeout == float("inf"):
                        timeout = None
                    continue 
Example #3
Source File: subprocess.py    From jawfish with MIT License 6 votes vote down vote up
def _remaining_time(self, endtime):
        """Convenience for _communicate when computing timeouts."""
        if endtime is None:
            return None
        else:
            return endtime - _time() 
Example #4
Source File: wait.py    From gist-alfred with MIT License 6 votes vote down vote up
def _retry_on_intr(fn, timeout):
        if timeout is None:
            deadline = float("inf")
        else:
            deadline = monotonic() + timeout

        while True:
            try:
                return fn(timeout)
            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
            except (OSError, select.error) as e:
                # 'e.args[0]' incantation works for both OSError and select.error
                if e.args[0] != errno.EINTR:
                    raise
                else:
                    timeout = deadline - monotonic()
                    if timeout < 0:
                        timeout = 0
                    if timeout == float("inf"):
                        timeout = None
                    continue 
Example #5
Source File: threading.py    From jawfish with MIT License 6 votes vote down vote up
def wait_for(self, predicate, timeout=None):
        """Wait until a condition evaluates to True.

        predicate should be a callable which result will be interpreted as a
        boolean value.  A timeout may be provided giving the maximum time to
        wait.

        """
        endtime = None
        waittime = timeout
        result = predicate()
        while not result:
            if waittime is not None:
                if endtime is None:
                    endtime = _time() + waittime
                else:
                    waittime = endtime - _time()
                    if waittime <= 0:
                        break
            self.wait(waittime)
            result = predicate()
        return result 
Example #6
Source File: dialog_test_logger.py    From botbuilder-python with MIT License 6 votes vote down vote up
def __init__(
        self,
        log_func: Callable[..., None] = None,
        json_indent: int = 4,
        time_func: Callable[[], float] = None,
    ):
        """
        Initialize a new instance of the dialog test logger.

        :param log_func: A callable method or object that can log a message,
        default to `logging.getLogger(__name__).info`.
        :type log_func: Callable[..., None]
        :param json_indent: An indent for json output, default indent is 4.
        :type json_indent: int
        :param time_func: A time function to record time spans, default to `time.monotonic`.
        :type time_func: Callable[[], float]
        """
        self._log = logging.getLogger(__name__).info if log_func is None else log_func
        self._stopwatch_state_key = f"stopwatch.{uuid.uuid4()}"
        self._json_indent = json_indent
        self._time_func = time.monotonic if time_func is None else time_func 
Example #7
Source File: wait.py    From misp42splunk with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _retry_on_intr(fn, timeout):
        if timeout is None:
            deadline = float("inf")
        else:
            deadline = monotonic() + timeout

        while True:
            try:
                return fn(timeout)
            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
            except (OSError, select.error) as e:
                # 'e.args[0]' incantation works for both OSError and select.error
                if e.args[0] != errno.EINTR:
                    raise
                else:
                    timeout = deadline - monotonic()
                    if timeout < 0:
                        timeout = 0
                    if timeout == float("inf"):
                        timeout = None
                    continue 
Example #8
Source File: throttle.py    From qutebrowser with GNU General Public License v3.0 6 votes vote down vote up
def __call__(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
        cur_time_ms = int(time.monotonic() * 1000)
        if self._pending_call is None:
            if (self._last_call_ms is None or
                    cur_time_ms - self._last_call_ms > self._delay_ms):
                # Call right now
                self._last_call_ms = cur_time_ms
                self._func(*args, **kwargs)
                return

            self._timer.setInterval(self._delay_ms -
                                    (cur_time_ms - self._last_call_ms))
            # Disconnect any existing calls, continue if no connections.
            try:
                self._timer.timeout.disconnect()
            except TypeError:
                pass
            self._timer.timeout.connect(self._call_pending)
            self._timer.start()

        # Update arguments for an existing pending call
        self._pending_call = _CallArgs(args=args, kwargs=kwargs) 
Example #9
Source File: thread_util.py    From recruit with Apache License 2.0 6 votes vote down vote up
def acquire(self, blocking=True, timeout=None):
        if not blocking and timeout is not None:
            raise ValueError("can't specify timeout for non-blocking acquire")
        rc = False
        endtime = None
        self._cond.acquire()
        while self._value == 0:
            if not blocking:
                break
            if timeout is not None:
                if endtime is None:
                    endtime = _time() + timeout
                else:
                    timeout = endtime - _time()
                    if timeout <= 0:
                        break
            self._cond.wait(timeout)
        else:
            self._value = self._value - 1
            rc = True
        self._cond.release()
        return rc 
Example #10
Source File: core.py    From schemathesis with MIT License 6 votes vote down vote up
def _wsgi_test(
    case: Case,
    checks: Iterable[CheckFunction],
    targets: Iterable[Target],
    result: TestResult,
    headers: Dict[str, Any],
    store_interactions: bool,
    feedback: Feedback,
) -> WSGIResponse:
    # pylint: disable=too-many-arguments
    with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
        start = time.monotonic()
        response = case.call_wsgi(headers=headers)
        elapsed = time.monotonic() - start
    run_targets(targets, elapsed)
    if store_interactions:
        result.store_wsgi_response(case, response, headers, elapsed)
    result.logs.extend(recorded.records)
    run_checks(case, checks, result, response)
    feedback.add_test_case(case, response)
    return response 
Example #11
Source File: backoff.py    From discord.py with MIT License 6 votes vote down vote up
def delay(self):
        """Compute the next delay

        Returns the next delay to wait according to the exponential
        backoff algorithm.  This is a value between 0 and base * 2^exp
        where exponent starts off at 1 and is incremented at every
        invocation of this method up to a maximum of 10.

        If a period of more than base * 2^11 has passed since the last
        retry, the exponent is reset to 1.
        """
        invocation = time.monotonic()
        interval = invocation - self._last_invocation
        self._last_invocation = invocation

        if interval > self._reset_time:
            self._exp = 0

        self._exp = min(self._exp + 1, self._max)
        return self._randfunc(0, self._base * 2 ** self._exp) 
Example #12
Source File: kademlia.py    From pyquarkchain with MIT License 6 votes vote down vote up
def add(self, node: Node) -> Node:
        """Try to add the given node to this bucket.

        If the node is already present, it is moved to the tail of the list, and we return None.

        If the node is not already present and the bucket has fewer than k entries, it is inserted
        at the tail of the list, and we return None.

        If the bucket is full, we add the node to the bucket's replacement cache and return the
        node at the head of the list (i.e. the least recently seen), which should be evicted if it
        fails to respond to a ping.
        """
        self.last_updated = time.monotonic()
        if node in self.nodes:
            self.nodes.remove(node)
            self.nodes.append(node)
        elif len(self) < self.k:
            self.nodes.append(node)
        else:
            self.replacement_cache.append(node)
            return self.head
        return None 
Example #13
Source File: heartbeat_publisher.py    From pyuavcan with MIT License 6 votes vote down vote up
def _task_function(self) -> None:
        next_heartbeat_at = time.monotonic()
        while self._maybe_task:
            try:
                self._call_pre_heartbeat_handlers()
                if self._presentation.transport.local_node_id is not None:
                    if not await self._publisher.publish(self.make_message()):
                        _logger.warning('%s heartbeat send timed out', self)

                next_heartbeat_at += self._publisher.send_timeout
                await asyncio.sleep(next_heartbeat_at - time.monotonic())
            except asyncio.CancelledError:
                _logger.debug('%s publisher task cancelled', self)
                break
            except pyuavcan.transport.ResourceClosedError as ex:
                _logger.debug('%s transport closed, publisher task will exit: %s', self, ex)
                break
            except Exception as ex:
                _logger.exception('%s publisher task exception: %s', self, ex)
        try:
            self._publisher.close()
        except pyuavcan.transport.TransportError:
            pass 
Example #14
Source File: _generate.py    From torf with GNU General Public License v3.0 6 votes vote down vote up
def __call__(self, cb_args, force_call=False):
        now = time_monotonic()
        prev_call_time = self._prev_call_time
        # _debug(f'CancelCallback: force_call={force_call}, prev_call_time={prev_call_time}, '
        #        f'now={now}, self._interval={self._interval}: {cb_args[1:]}')
        if (force_call or                             # Special case (e.g. exception in Torrent.verify())
            prev_call_time is None or                 # This is the first call
            now - prev_call_time >= self._interval):  # Previous call was at least `interval` seconds ago
            self._prev_call_time = now
            try:
                _debug(f'CancelCallback: Calling callback with {cb_args[1:]}')
                return_value = self._callback(*cb_args)
                if return_value is not None:
                    _debug(f'CancelCallback: Callback cancelled: {return_value!r}')
                    self._cancelled()
                    return True
                return False
            except BaseException as e:
                _debug(f'CancelCallback: Caught exception: {e!r}')
                self._cancelled()
                raise 
Example #15
Source File: test_apex_agent_long_task_learning.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def test_worker_init(self):
        """
        Tests if workers initialize without problems for the pong config.
        """
        agent_config = config_from_path("configs/ray_apex_for_pong.json")

        # Long initialization times can lead to Ray crashes.
        start = time.monotonic()
        executor = ApexExecutor(
            environment_spec=self.env_spec,
            agent_config=agent_config,
        )
        end = time.monotonic() - start
        print("Initialized {} workers in {} s.".format(
            executor.num_sample_workers, end
        ))
        executor.test_worker_init() 
Example #16
Source File: kademlia.py    From pyquarkchain with MIT License 5 votes vote down vote up
def idle_buckets(self) -> List[KBucket]:
        idle_cutoff_time = time.monotonic() - k_idle_bucket_refresh_interval
        return [b for b in self.buckets if b.last_updated < idle_cutoff_time] 
Example #17
Source File: more.py    From python-netsurv with MIT License 5 votes vote down vote up
def time_limited(limit_seconds, iterable):
    """
    Yield items from *iterable* until *limit_seconds* have passed.

    >>> from time import sleep
    >>> def generator():
    ...     yield 1
    ...     yield 2
    ...     sleep(0.2)
    ...     yield 3
    >>> iterable = generator()
    >>> list(time_limited(0.1, iterable))
    [1, 2]

    Note that the time is checked before each item is yielded, and iteration
    stops if  the time elapsed is greater than *limit_seconds*. If your time
    limit is 1 second, but it takes 2 seconds to generate the first item from
    the iterable, the function will run for 2 seconds and not yield anything.

    """
    if limit_seconds < 0:
        raise ValueError('limit_seconds must be positive')

    start_time = monotonic()
    for item in iterable:
        if monotonic() - start_time > limit_seconds:
            break
        yield item 
Example #18
Source File: misc.py    From RPGBot with GNU General Public License v3.0 5 votes vote down vote up
def ping(self, ctx):
        """Test the bot's connection ping"""
        a = monotonic()
        await (await ctx.bot.shards[getattr(ctx.guild, "shard_id", 0)].ws.ping())
        b = monotonic()
        ping = "{:.3f}ms".format((b - a) * 1000)
        msg = f"P{choice('aeiou')}ng {ping}"
        await ctx.send(msg) 
Example #19
Source File: more.py    From python-netsurv with MIT License 5 votes vote down vote up
def time_limited(limit_seconds, iterable):
    """
    Yield items from *iterable* until *limit_seconds* have passed.

    >>> from time import sleep
    >>> def generator():
    ...     yield 1
    ...     yield 2
    ...     sleep(0.2)
    ...     yield 3
    >>> iterable = generator()
    >>> list(time_limited(0.1, iterable))
    [1, 2]

    Note that the time is checked before each item is yielded, and iteration
    stops if  the time elapsed is greater than *limit_seconds*. If your time
    limit is 1 second, but it takes 2 seconds to generate the first item from
    the iterable, the function will run for 2 seconds and not yield anything.

    """
    if limit_seconds < 0:
        raise ValueError('limit_seconds must be positive')

    start_time = monotonic()
    for item in iterable:
        if monotonic() - start_time > limit_seconds:
            break
        yield item 
Example #20
Source File: runexecutor.py    From benchexec with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        cgroups,
        hardtimelimit,
        softtimelimit,
        walltimelimit,
        pid_to_kill,
        cores,
        callbackFn=lambda reason: None,
    ):
        super(_TimelimitThread, self).__init__()
        self.name = "TimelimitThread-" + self.name

        if hardtimelimit or softtimelimit:
            assert CPUACCT in cgroups
        assert walltimelimit is not None

        if cores:
            self.cpuCount = len(cores)
        else:
            try:
                self.cpuCount = multiprocessing.cpu_count()
            except NotImplementedError:
                self.cpuCount = 1

        self.cgroups = cgroups
        # set timelimits to large dummy value if no limit is given
        self.timelimit = hardtimelimit or (60 * 60 * 24 * 365 * 100)
        self.softtimelimit = softtimelimit or (60 * 60 * 24 * 365 * 100)
        self.latestKillTime = time.monotonic() + walltimelimit
        self.pid_to_kill = pid_to_kill
        self.callback = callbackFn
        self.finished = threading.Event() 
Example #21
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_rlgraph_sampling(self):
        """
        Tests RLgraph's sampling performance.
        """
        memory = ApexMemory(
            capacity=self.capacity,
            alpha=1.0
        )

        records = [self.record_space.sample(size=1) for _ in range_(self.inserts)]
        for record in records:
            memory.insert_records((
                 ray_compress(record['states']),
                 record['actions'],
                 record['reward'],
                 record['terminals'],
                 None
            ))
        start = time.monotonic()
        for _ in range_(self.samples):
            batch_tuple = memory.get_records(self.sample_batch_size)
        end = time.monotonic() - start
        tp = self.samples / end
        print('#### Testing RLGraph Prioritized Replay memory ####')
        print('Testing sampling performance:')
        print('Sampled {} batches, throughput: {} batches/s, total time: {} s'.format(
            self.samples, tp, end
        )) 
Example #22
Source File: core.py    From avrae with GNU General Public License v3.0 5 votes vote down vote up
def about(self, ctx):
        """Information about the bot."""
        stats = {}
        statKeys = ("dice_rolled_life", "spells_looked_up_life", "monsters_looked_up_life", "commands_used_life",
                    "items_looked_up_life", "rounds_init_tracked_life", "turns_init_tracked_life")
        for k in statKeys:
            stats[k] = await Stats.get_statistic(ctx, k)

        embed = discord.Embed(description='Avrae, a bot to streamline D&D 5e online.\n'
                                          'Check out the latest release notes '
                                          '[here](https://github.com/avrae/avrae/releases/latest).')
        embed.title = "Invite Avrae to your server!"
        embed.url = "https://invite.avrae.io"
        embed.colour = 0x7289da
        total_members = sum(1 for _ in self.bot.get_all_members())
        unique_members = len(self.bot.users)
        members = '%s total\n%s unique' % (total_members, unique_members)
        embed.add_field(name='Members (Cluster)', value=members)
        embed.add_field(name='Uptime', value=str(timedelta(seconds=round(time.monotonic() - self.start_time))))
        motd = random.choice(["May the RNG be with you", "May your rolls be high",
                              "Will give higher rolls for cookies", ">:3",
                              "Does anyone even read these?"])
        embed.set_footer(
            text=f'{motd} | Build {await self.bot.rdb.get("build_num")} | Cluster {self.bot.cluster_id}')

        commands_run = "{commands_used_life} total\n{dice_rolled_life} dice rolled\n" \
                       "{spells_looked_up_life} spells looked up\n{monsters_looked_up_life} monsters looked up\n" \
                       "{items_looked_up_life} items looked up\n" \
                       "{rounds_init_tracked_life} rounds of initiative tracked ({turns_init_tracked_life} turns)" \
            .format(**stats)
        embed.add_field(name="Commands Run", value=commands_run)
        embed.add_field(name="Servers", value=f"{len(self.bot.guilds)} on this cluster\n"
                                              f"{await Stats.get_guild_count(self.bot)} total")
        memory_usage = psutil.Process().memory_full_info().uss / 1024 ** 2
        embed.add_field(name='Memory Usage', value='{:.2f} MiB'.format(memory_usage))
        embed.add_field(name='About', value='Made with :heart: by zhu.exe#4211 and the D&D Beyond team\n'
                                            'Join the official development server [here](https://discord.gg/pQbd4s6)!',
                        inline=False)

        await ctx.send(embed=embed) 
Example #23
Source File: core.py    From avrae with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, bot):
        self.bot = bot
        self.start_time = time.monotonic() 
Example #24
Source File: stats.py    From avrae with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, bot):
        """
        :type bot: :class:`dbot.Avrae`
        """
        self.bot = bot
        self.start_time = time.monotonic()
        self.command_stats = Counter()
        self.bot.loop.create_task(self.scheduled_update())

    # ===== listeners ===== 
Example #25
Source File: subprocess.py    From jawfish with MIT License 5 votes vote down vote up
def _check_timeout(self, endtime, orig_timeout):
        """Convenience for checking if a timeout has expired."""
        if endtime is None:
            return
        if _time() > endtime:
            raise TimeoutExpired(self.args, orig_timeout) 
Example #26
Source File: backoff.py    From discord.py with MIT License 5 votes vote down vote up
def __init__(self, base=1, *, integral=False):
        self._base = base

        self._exp = 0
        self._max = 10
        self._reset_time = base * 2 ** 11
        self._last_invocation = time.monotonic()

        # Use our own random instance to avoid messing with global one
        rand = random.Random()
        rand.seed()

        self._randfunc = rand.randrange if integral else rand.uniform 
Example #27
Source File: subprocess.py    From jawfish with MIT License 5 votes vote down vote up
def wait(self, timeout=None, endtime=None):
            """Wait for child process to terminate.  Returns returncode
            attribute."""
            if self.returncode is not None:
                return self.returncode

            # endtime is preferred to timeout.  timeout is only used for
            # printing.
            if endtime is not None or timeout is not None:
                if endtime is None:
                    endtime = _time() + timeout
                elif timeout is None:
                    timeout = self._remaining_time(endtime)

            if endtime is not None:
                # Enter a busy loop if we have a timeout.  This busy loop was
                # cribbed from Lib/threading.py in Thread.wait() at r71065.
                delay = 0.0005 # 500 us -> initial delay of 1 ms
                while True:
                    (pid, sts) = self._try_wait(os.WNOHANG)
                    assert pid == self.pid or pid == 0
                    if pid == self.pid:
                        self._handle_exitstatus(sts)
                        break
                    remaining = self._remaining_time(endtime)
                    if remaining <= 0:
                        raise TimeoutExpired(self.args, timeout)
                    delay = min(delay * 2, remaining, .05)
                    time.sleep(delay)
            else:
                while self.returncode is None:
                    (pid, sts) = self._try_wait(0)
                    # Check the pid and loop as waitpid has been known to return
                    # 0 even without WNOHANG in odd situations.  issue14396.
                    if pid == self.pid:
                        self._handle_exitstatus(sts)
            return self.returncode 
Example #28
Source File: queue.py    From jawfish with MIT License 5 votes vote down vote up
def put(self, item, block=True, timeout=None):
        '''Put an item into the queue.

        If optional args 'block' is true and 'timeout' is None (the default),
        block if necessary until a free slot is available. If 'timeout' is
        a non-negative number, it blocks at most 'timeout' seconds and raises
        the Full exception if no free slot was available within that time.
        Otherwise ('block' is false), put an item on the queue if a free slot
        is immediately available, else raise the Full exception ('timeout'
        is ignored in that case).
        '''
        with self.not_full:
            if self.maxsize > 0:
                if not block:
                    if self._qsize() >= self.maxsize:
                        raise Full
                elif timeout is None:
                    while self._qsize() >= self.maxsize:
                        self.not_full.wait()
                elif timeout < 0:
                    raise ValueError("'timeout' must be a non-negative number")
                else:
                    endtime = time() + timeout
                    while self._qsize() >= self.maxsize:
                        remaining = endtime - time()
                        if remaining <= 0.0:
                            raise Full
                        self.not_full.wait(remaining)
            self._put(item)
            self.unfinished_tasks += 1
            self.not_empty.notify() 
Example #29
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_ray_combined_ops(self):
        """
        Tests a combined workflow of insert, sample, update on the prioritized replay memory.
        """
        assert get_distributed_backend() == "ray"
        memory = PrioritizedReplayBuffer(
            size=self.capacity,
            alpha=1.0,
            clip_rewards=True
        )
        chunksize = 32

        # Test chunked inserts -> done via external for loop in Ray.
        chunks = int(self.inserts / chunksize)
        records = [self.record_space.sample(size=chunksize) for _ in range_(chunks)]
        loss_values = [np.random.random(size=self.sample_batch_size) for _ in range_(chunks)]
        start = time.monotonic()

        for chunk, loss_values in zip(records, loss_values):
            # Insert.
            for i in range_(chunksize):
                memory.add(
                    obs_t=ray_compress(chunk['states'][i]),
                    action=chunk['actions'][i],
                    reward=chunk['reward'][i],
                    obs_tp1=ray_compress(chunk['states'][i]),
                    done=chunk['terminals'][i],
                    weight=None
                )
            # Sample.
            batch_tuple = memory.sample(self.sample_batch_size, beta=1.0)
            indices = batch_tuple[-1]
            # Update
            memory.update_priorities(indices, loss_values)

        end = time.monotonic() - start
        tp = len(records) / end
        print('Ray: testing combined insert/sample/update performance:')
        print('Ran {} combined ops, throughput: {} combined ops/s, total time: {} s'.format(
            len(records), tp, end
        )) 
Example #30
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_ray_sampling(self):
        """
        Tests Ray's memory performance.
        """
        assert get_distributed_backend() == "ray"
        memory = PrioritizedReplayBuffer(
            size=self.capacity,
            alpha=1.0,
            clip_rewards=True
        )
        records = [self.record_space.sample(size=1) for _ in range_(self.inserts)]
        for record in records:
            memory.add(
                obs_t=ray_compress(record['states']),
                action=record['actions'],
                reward=record['reward'],
                obs_tp1=ray_compress(record['states']),
                done=record['terminals'],
                weight=None
            )
        start = time.monotonic()
        for _ in range_(self.samples):
            batch_tuple = memory.sample(self.sample_batch_size, beta=1.0)
        end = time.monotonic() - start
        tp = self.samples / end
        print('#### Testing Ray Prioritized Replay memory ####')
        print('Testing sampling performance:')
        print('Sampled {} batches, throughput: {} samples/s, total time: {} s'.format(
            self.samples, tp, end
        ))