Python time.perf_counter() Examples

The following are 30 code examples of time.perf_counter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module time , or try the search function .
Example #1
Source File: train.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def train_runtime(model, data, epochs, device):
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    model = model.to(device)
    data = data.to(device)
    model.train()
    mask = data.train_mask if 'train_mask' in data else data.train_idx
    y = data.y[mask] if 'train_mask' in data else data.train_y

    if torch.cuda.is_available():
        torch.cuda.synchronize()
    t_start = time.perf_counter()

    for epoch in range(epochs):
        optimizer.zero_grad()
        out = model(data)
        loss = F.nll_loss(out[mask], y)
        loss.backward()
        optimizer.step()

    if torch.cuda.is_available():
        torch.cuda.synchronize()
    t_end = time.perf_counter()

    return t_end - t_start 
Example #2
Source File: db.py    From query-exporter with GNU General Public License v3.0 6 votes vote down vote up
def _setup_query_latency_tracking(self):
        engine = self._engine.sync_engine

        @event.listens_for(engine, "before_cursor_execute")
        def before_cursor_execute(
            conn, cursor, statement, parameters, context, executemany
        ):
            conn.info["query_start_time"] = perf_counter()

        @event.listens_for(engine, "after_cursor_execute")
        def after_cursor_execute(
            conn, cursor, statement, parameters, context, executemany
        ):
            conn.info["query_latency"] = perf_counter() - conn.info.pop(
                "query_start_time"
            ) 
Example #3
Source File: geometry_test.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def setUp(self):
        self.num_bboxes1 = 200
        self.num_bboxes2 = 20000
        self.image_width = 1024
        self.image_height = 1024

        self.bboxes1 = np.zeros([self.num_bboxes1, 4])
        self.bboxes1[:, [0, 2]] = np.sort(np.random.rand(self.num_bboxes1, 2) * (self.image_width - 1))
        self.bboxes1[:, [1, 3]] = np.sort(np.random.rand(self.num_bboxes1, 2) * (self.image_height - 1))

        self.bboxes2 = np.zeros([self.num_bboxes2, 4])
        self.bboxes2[:, [0, 2]] = np.sort(np.random.rand(self.num_bboxes2, 2) * (self.image_width - 1))
        self.bboxes2[:, [1, 3]] = np.sort(np.random.rand(self.num_bboxes2, 2) * (self.image_height - 1))

        self.bboxes1_tensor = torch.from_numpy(self.bboxes1)
        self.bboxes2_tensor = torch.from_numpy(self.bboxes2)

        start = time.perf_counter()
        self.ious = bbox_overlaps(self.bboxes1_tensor, self.bboxes2_tensor).numpy()
        elapsed = (time.perf_counter() - start)
        print('bbox_overlaps time: ', elapsed) 
Example #4
Source File: agents.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def metrics(self):
        if self._run_t is None:
            activity_ratio = 0
        else:
            total_t = perf_counter() - self._run_t
            activity_ratio = self.t_active / (total_t)
        own_computations = { c.name for c in self.computations(include_technical=True)}
        m = {
            'count_ext_msg': {k: v
                              for k, v in self._messaging.count_ext_msg.items()
                              if k in own_computations},
            'size_ext_msg': {k: v
                             for k, v in self._messaging.size_ext_msg.items()
                             if k in own_computations},
            # 'last_msg_time': self._messaging.last_msg_time,
            'activity_ratio': activity_ratio,
            'cycles': {c.name: c.cycle_count for c in self.computations()}
        }
        return m 
Example #5
Source File: validation.py    From stdpopsim with GNU General Public License v3.0 6 votes vote down vote up
def _onepop_expgrowth(
        engine_id, out_dir, seed, N0=5000, N1=500, T=1000, **sim_kwargs):
    growth_rate = - np.log(N1 / N0) / T
    species = stdpopsim.get_species("DroMel")
    contig = species.get_contig("chr2R", length_multiplier=0.01)  # ~250 kb
    contig = irradiate(contig)
    model = _PiecewiseSize(N0, growth_rate, (T, N1, 0))
    model.generation_time = species.generation_time
    samples = model.get_samples(100)
    engine = stdpopsim.get_engine(engine_id)
    t0 = time.perf_counter()
    ts = engine.simulate(model, contig, samples, seed=seed, **sim_kwargs)
    t1 = time.perf_counter()
    out_file = out_dir / f"{seed}.trees"
    ts.dump(out_file)
    return out_file, t1 - t0 
Example #6
Source File: validation.py    From stdpopsim with GNU General Public License v3.0 6 votes vote down vote up
def _twopop_IM(
        engine_id, out_dir, seed,
        NA=1000, N1=500, N2=5000, T=1000, M12=0, M21=0, pulse=None, samples=None,
        **sim_kwargs):
    species = stdpopsim.get_species("AraTha")
    contig = species.get_contig("chr5", length_multiplier=0.01)  # ~270 kb
    contig = irradiate(contig)
    model = stdpopsim.IsolationWithMigration(
            NA=NA, N1=N1, N2=N2, T=T, M12=M12, M21=M21)
    if pulse is not None:
        model.demographic_events.append(pulse)
        model.demographic_events.sort(key=lambda x: x.time)
    # XXX: AraTha has species.generation_time == 1, but there is the potential
    # for this to mask bugs related to generation_time scaling, so we use 3 here.
    model.generation_time = 3
    if samples is None:
        samples = model.get_samples(50, 50, 0)
    engine = stdpopsim.get_engine(engine_id)
    t0 = time.perf_counter()
    ts = engine.simulate(model, contig, samples, seed=seed, **sim_kwargs)
    t1 = time.perf_counter()
    out_file = out_dir / f"{seed}.trees"
    ts.dump(out_file)
    return out_file, t1 - t0 
Example #7
Source File: bench_grad_alpha.py    From entmax with MIT License 6 votes vote down vote up
def bench(f_):
    timings_fwd = []
    timings_bck = []
    for _ in range(100):

        with f_ as f:
            tic = time.perf_counter()
            f.forward()
            torch.cuda.synchronize()
            toc = time.perf_counter()
            timings_fwd.append(toc - tic)

            tic = time.perf_counter()
            f.backward()
            torch.cuda.synchronize()
            toc = time.perf_counter()
            timings_bck.append(toc - tic)

    return (np.percentile(timings_fwd, [25, 50, 75]),
            np.percentile(timings_bck, [25, 50, 75])) 
Example #8
Source File: gateway.py    From discord.py with MIT License 6 votes vote down vote up
def __init__(self, *args, **kwargs):
        ws = kwargs.pop('ws', None)
        interval = kwargs.pop('interval', None)
        shard_id = kwargs.pop('shard_id', None)
        threading.Thread.__init__(self, *args, **kwargs)
        self.ws = ws
        self._main_thread_id = ws.thread_id
        self.interval = interval
        self.daemon = True
        self.shard_id = shard_id
        self.msg = 'Keeping websocket alive with sequence %s.'
        self.block_msg = 'Heartbeat blocked for more than %s seconds.'
        self.behind_msg = 'Can\'t keep up, websocket is %.1fs behind.'
        self._stop_ev = threading.Event()
        self._last_ack = time.perf_counter()
        self._last_send = time.perf_counter()
        self.latency = float('inf')
        self.heartbeat_timeout = ws._max_heartbeat_timeout 
Example #9
Source File: inference.py    From tsinfer with GNU General Public License v3.0 6 votes vote down vote up
def _run_synchronous(self, progress):
        a = np.zeros(self.num_sites, dtype=np.int8)
        for t, focal_sites in self.descriptors:
            before = time.perf_counter()
            s, e = self.ancestor_builder.make_ancestor(focal_sites, a)
            duration = time.perf_counter() - before
            logger.debug(
                "Made ancestor in {:.2f}s at timepoint {} (epoch {}) "
                "from {} to {} (len={}) with {} focal sites ({})".format(
                    duration,
                    t,
                    self.timepoint_to_epoch[t],
                    s,
                    e,
                    e - s,
                    focal_sites.shape[0],
                    focal_sites,
                )
            )
            self.ancestor_data.add_ancestor(
                start=s, end=e, time=t, focal_sites=focal_sites, haplotype=a[s:e]
            )
            progress.update() 
Example #10
Source File: utility.py    From news-popularity-prediction with Apache License 2.0 6 votes vote down vote up
def form_graphs(social_context_generator, assessment_timestamp):
    # fp = open("/home/georgerizos/Documents/fetch_times/build_graph_time" + ".txt", "a")
    for social_context_dict in social_context_generator:
        # start_time = time.perf_counter()
        snapshots,\
        targets,\
        title = get_snapshot_graphs(social_context_dict["social_context"],
                                    # social_context_dict["tweet_timestamp"],
                                    assessment_timestamp,
                                    social_context_dict["platform_name"])
        # elapsed_time = time.perf_counter() - start_time
        # fp.write(repr(elapsed_time) + "\n")
        if snapshots is None:
            continue

        if len(snapshots) > 1:
            graph_dict = social_context_dict
            graph_dict["snapshots"] = snapshots
            graph_dict["targets"] = targets
            graph_dict["title"] = title
            yield graph_dict 
Example #11
Source File: train_eval.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def run(train_dataset, test_dataset, model, epochs, batch_size, lr,
        lr_decay_factor, lr_decay_step_size, weight_decay):

    model = model.to(device)
    optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size, shuffle=False)

    for epoch in range(1, epochs + 1):
        if torch.cuda.is_available():
            torch.cuda.synchronize()

        t_start = time.perf_counter()

        train(model, optimizer, train_loader, device)
        test_acc = test(model, test_loader, device)

        if torch.cuda.is_available():
            torch.cuda.synchronize()

        t_end = time.perf_counter()

        print('Epoch: {:03d}, Test: {:.4f}, Duration: {:.2f}'.format(
            epoch, test_acc, t_end - t_start))

        if epoch % lr_decay_step_size == 0:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr_decay_factor * param_group['lr'] 
Example #12
Source File: utility.py    From news-popularity-prediction with Apache License 2.0 6 votes vote down vote up
def youtube_daemon_worker(id, youtube_queue, social_context_queue, youtube_module_communication, youtube_oauth_credentials_folder):
    while True:
        if social_context_queue.qsize() > 50:
            time.sleep(10.0)
        else:
            url_counter, url, upper_timestamp = youtube_queue.get()

            try:
                # start_time = time.perf_counter()
                social_context = youtube_social_context.collect(url, youtube_module_communication + "_" + str(id), youtube_oauth_credentials_folder)
                # elapsed_time = time.perf_counter() - start_time
                # if social_context is not None:
                #     social_context["elapsed_time"] = elapsed_time
            except KeyError:
                social_context = None

            social_context_queue.put((url_counter, social_context))
            youtube_queue.task_done() 
Example #13
Source File: utility.py    From news-popularity-prediction with Apache License 2.0 6 votes vote down vote up
def reddit_daemon_worker(id, reddit_queue, social_context_queue, reddit_oauth_credentials_path):
    while True:
        if social_context_queue.qsize() > 50:
            time.sleep(10.0)
        else:
            url_counter, url, upper_timestamp = reddit_queue.get()

            try:
                # start_time = time.perf_counter()
                social_context = reddit_social_context.collect(url, reddit_oauth_credentials_path)
                # elapsed_time = time.perf_counter() - start_time
                # if social_context is not None:
                #     social_context["elapsed_time"] = elapsed_time
            except KeyError:
                social_context = None

            social_context_queue.put((url_counter, social_context))
            reddit_queue.task_done() 
Example #14
Source File: orchestrator.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _orchestrator_run_computations(self, *_):
        """
        Request all orchestrated agents to start their computations.
        """
        self.logger.info('Request agents to run')
        self.start_time = perf_counter()
        for agt in self.discovery.agents():
            if agt == 'orchestrator':
                continue
            computations = self.initial_dist.computations_hosted(agt)
            if not self._orchestrator.repair_only:
                self._send_mgt_msg(agt, RunAgentMessage(computations))
            self._agts_state[agt] = 'running' 
Example #15
Source File: thief.py    From discord_cogs with GNU General Public License v3.0 5 votes vote down vote up
def run_death(self, user):
        await self.config.member(user).CrimLevel.set(0)
        await self.config.member(user).OOB.set(False)
        await self.config.member(user).BailC.set(0)
        await self.config.member(user).Sentence.set(0)
        await self.config.member(user).Status.set("Dead")
        await self.config.member(user).TotalDeaths.set(await self.config.member(user).TotalDeaths() +1)
        await self.config.member(user).JailC.set(0)
        await self.config.member(user).DeathT(int(time.perf_counter()))
        if (await self.config.guild(user.guild).Config())["Hardcore"]:
            await self.hardcore_handler(user) 
Example #16
Source File: profiling.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def perform_profiling(mod_name, repeat=1, seed=0, filename=fn_template, **kwargs):
    """Run profiling of the `benchmark` function in the given module.

    Parameters
    ----------
    mod_name : str
        The name of a module containing the benchmark.
        Must define the functions ``data = setup_benchmark(size, **kwargs)``,
        which is followed by multiple ``benchmark(data)``, which should be benchmarked.
    repeat : int
        Repeat the `benchmark` function to be profiled that many times.
    seed : int
        Seed of the random number generator with this number to enhance reproducability
    filename : str
        Template for the filename.
    **kwargs :
        Further arguments given to the `setup_benchmark` function.
        Note: is formated to a string with ``repr(kwargs)``. Don't use too complicated arguements!
    """
    kwargs['mod_name'] = mod_name
    filename = filename.format(mod_q_str='_'.join([str(q) for q in kwargs['mod_q']]), **kwargs)
    np.random.seed(seed)
    setup_code = "import {mod_name!s}\ndata = {mod_name!s}.setup_benchmark(**{kwargs!r})"
    setup_code = setup_code.format(mod_name=mod_name, kwargs=kwargs)
    namespace = {}
    exec(setup_code, namespace, namespace)
    timing_code = "{mod_name}.benchmark(data)".format(mod_name=mod_name)
    if repeat > 1:
        timing_code = "for _ in range({repeat:d}): ".format(repeat=repeat) + timing_code
    if sys.version_info > (3, 3):
        prof = cProfile.Profile(time.perf_counter)
    else:
        prof = cProfile.Profile()
    prof.runctx(timing_code, namespace, namespace)
    prof.dump_stats(filename)

    #  cProfile.runctx(timing_code, namespace, namespace, filename)
    print("saved profiling to", filename)
    return filename 
Example #17
Source File: mem-latency.py    From py-uio with MIT License 5 votes vote down vote up
def latency():
        t0 = time.perf_counter()
        m.value
        t1 = time.perf_counter()
        return t1 - t0 
Example #18
Source File: test_lock.py    From aiozk with MIT License 5 votes vote down vote up
def test_timeout_accuracy(zk, path):
    lock = zk.recipes.Lock(path)

    async with await lock.acquire():
        lock2 = zk.recipes.Lock(path)
        analyze_siblings = lock2.analyze_siblings
        lock2.analyze_siblings = asynctest.CoroutineMock()

        async def slow_analyze():
            await asyncio.sleep(0.5)
            return await analyze_siblings()

        lock2.analyze_siblings.side_effect = slow_analyze

        acquired = False
        start = time.perf_counter()
        with pytest.raises(TimeoutError):
            async with await lock2.acquire(timeout=0.5):
                acquired = True

        elapsed = time.perf_counter() - start

    await zk.deleteall(path)

    assert not acquired
    assert elapsed < 1 
Example #19
Source File: agents.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_periodic_action(self, period: float, cb: Callable):
        """
        Set a periodic action.

        The callback `cb` will be called every `period` seconds. The delay
        is not strict. The handling of a message is never interrupted,
        if it takes longer than `period`, the callback will be delayed and
        will only be called once the task has finished.

        Parameters
        ----------
        period: float
            a period in second
        cb: Callable
            a callback with no argument

        Returns
        -------
        handle:
            An handle that can be used to remove the periodic action.
            This handle is actually the callback object itself.

        """
        assert period != None
        assert cb != None
        self.logger.debug("Add periodic action %s - %s ", period, cb)
        self._periodic_cb[cb] = (period, perf_counter())
        return cb 
Example #20
Source File: agents.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _process_periodic_action(self):
        # Process periodic action. Only once the agents runs the
        # computations (i.e. self._run_t is not None)
        ct = perf_counter()
        if self._start_t is not None :
            for cb, (p, last_t) in list(self._periodic_cb.items()):
                if ct - last_t >= p:
                    # self.logger.debug('periodic cb %s, %s %s ', cb, ct, last_t)
                    # Must update the cb entry BEFORE calling the cb, in case
                    # the cb attemps to modify (e.g. remove) it's own entry by
                    # calling remove_periodic_action
                    self._periodic_cb[cb] = (p, ct)
                    cb() 
Example #21
Source File: smatch-table.py    From smatch with MIT License 5 votes vote down vote up
def main(arguments):
    global verbose
    (ids, names, result) = check_args(arguments)
    if arguments.v:
        verbose = True
    if not result:
        return 0
    acc_time = 0
    len_name = len(names)
    table = []
    for i in range(0, len_name + 1):
        table.append([])
    table[0].append("")
    for i in range(0, len_name):
        table[0].append(names[i])
    for i in range(0, len_name):
        table[i+1].append(names[i])
        for j in range(0, len_name):
            if i != j:
                start = time.perf_counter()
                table[i+1].append(compute_files(names[i], names[j], ids, args.fd, args.r))
                end = time.perf_counter()
                if table[i+1][-1] != -1.0:
                    acc_time += end-start
            else:
                table[i+1].append("")
    # check table
    for i in range(0, len_name + 1):
        for j in range(0, len_name + 1):
            if i != j:
                if table[i][j] != table[j][i]:
                    if table[i][j] > table[j][i]:
                        table[j][i] = table[i][j]
                    else:
                        table[i][j] = table[j][i]
    pprint_table(table)
    return acc_time 
Example #22
Source File: utility.py    From news-popularity-prediction with Apache License 2.0 5 votes vote down vote up
def extract_features(graph_generator, assessment_timestamp):
    # fp = open("/home/georgerizos/Documents/fetch_times/extract_features_time" + ".txt", "a")
    for graph_snapshot_dict in graph_generator:
        # start_time = time.perf_counter()
        snapshots = graph_snapshot_dict["snapshots"]

        initial_post = graph_snapshot_dict["social_context"]["initial_post"]
        author = graph_snapshot_dict["social_context"]["author"]
        platform = graph_snapshot_dict["platform_name"]

        snapshots_with_features = list()
        tweet_timestamp = graph_snapshot_dict["tweet_timestamp"]
        for snapshot_dict in snapshots:
            comment_tree = snapshot_dict["comment_tree"]
            user_graph = snapshot_dict["user_graph"]
            timestamp_list = snapshot_dict["timestamp_list"]

            features = extract_snapshot_features(comment_tree,
                                                 user_graph,
                                                 timestamp_list,
                                                 assessment_timestamp,
                                                 # tweet_timestamp,
                                                 initial_post,
                                                 author,
                                                 platform)
            snapshot_dict["features"] = features

            snapshots_with_features.append(snapshot_dict)

        features_dict = graph_snapshot_dict
        features_dict["snapshots"] = snapshots_with_features

        # elapsed_time = time.perf_counter() - start_time
        # fp.write(repr(elapsed_time) + "\n")

        yield features_dict 
Example #23
Source File: warp_dataset.py    From DepthNets with MIT License 5 votes vote down vote up
def time(self):
        return time.perf_counter() - self._start_time 
Example #24
Source File: warp_dataset.py    From DepthNets with MIT License 5 votes vote down vote up
def restart(self):
        self._start_time = time.perf_counter() 
Example #25
Source File: bfu_WriteText.py    From Blender-For-UnrealEngine-Addons with GNU General Public License v3.0 5 votes vote down vote up
def ExportSingleConfigParser(config, dirpath, filename):
	#Export single ConfigParser

	filename = ValidFilename(filename)
	curr_time = time.perf_counter()

	absdirpath = bpy.path.abspath(dirpath)
	VerifiDirs(absdirpath)
	fullpath = os.path.join( absdirpath , filename )

	with open(fullpath, "w") as configfile:
		config.write(configfile)

	exportTime = time.perf_counter()-curr_time
	return([filename,"TextFile",absdirpath,exportTime]) #[AssetName , AssetType , ExportPath, ExportTime] 
Example #26
Source File: bfu_WriteText.py    From Blender-For-UnrealEngine-Addons with GNU General Public License v3.0 5 votes vote down vote up
def ExportSingleText(text, dirpath, filename):
	#Export single text

	filename = ValidFilename(filename)
	curr_time = time.perf_counter()

	absdirpath = bpy.path.abspath(dirpath)
	VerifiDirs(absdirpath)
	fullpath = os.path.join( absdirpath , filename )

	with open(fullpath, "w") as file:
		file.write(text)

	exportTime = time.perf_counter()-curr_time
	return([filename,"TextFile",absdirpath,exportTime]) #[AssetName , AssetType , ExportPath, ExportTime] 
Example #27
Source File: bfu_ExportAssetsByType.py    From Blender-For-UnrealEngine-Addons with GNU General Public License v3.0 5 votes vote down vote up
def ExportSingleAlembicAnimation(originalScene, dirpath, filename, obj):
	'''
	#####################################################
			#ALEMBIC ANIMATION
	#####################################################
	'''
	#Export a single alembic animation

	scene = bpy.context.scene
	filename = ValidFilenameForUnreal(filename)
	curr_time = time.perf_counter()
	if	bpy.ops.object.mode_set.poll():
		bpy.ops.object.mode_set(mode = 'OBJECT')

	SelectParentAndDesiredChilds(obj)

	scene.frame_start += obj.StartFramesOffset
	scene.frame_end += obj.EndFramesOffset
	absdirpath = bpy.path.abspath(dirpath)
	VerifiDirs(absdirpath)
	fullpath = os.path.join( absdirpath , filename )

	##Export
	bpy.ops.wm.alembic_export(
		filepath=fullpath,
		check_existing=False,
		selected=True,
		triangulate=False,
		)

	scene.frame_start -= obj.StartFramesOffset
	scene.frame_end -= obj.EndFramesOffset
	exportTime = time.perf_counter()-curr_time

	MyAsset = originalScene.UnrealExportedAssetsList.add()
	MyAsset.assetName = filename
	MyAsset.assetType = "Alembic"
	MyAsset.exportPath = absdirpath
	MyAsset.exportTime = exportTime
	MyAsset.object = obj
	return MyAsset 
Example #28
Source File: run.py    From Penny-Dreadful-Tools with GNU General Public License v3.0 5 votes vote down vote up
def run_all_tasks(module: Any, with_flag: Optional[str] = None) -> None:
    error = None
    app_context = None
    m = importlib.import_module('{module}'.format(module=module))
    # pylint: disable=unused-variable
    for importer, modname, ispkg in pkgutil.iter_modules(m.__path__): # type: ignore
        try:
            s = importlib.import_module('{module}.{name}'.format(name=modname, module=module))
            use_app_conext = getattr(s, 'REQUIRES_APP_CONTEXT', True)
            if use_app_conext and app_context is None:
                from decksite import APP
                APP.config['SERVER_NAME'] = configuration.server_name()
                app_context = APP.app_context() # type: ignore
                app_context.__enter__() # type: ignore

            if with_flag and not getattr(s, with_flag, False):
                continue
            if getattr(s, 'scrape', None) is not None:
                timer = time.perf_counter()
                s.scrape() # type: ignore
                t = time.perf_counter() - timer
                print(f'{s.__name__} completed in {t}')

            elif getattr(s, 'run', None) is not None:
                timer = time.perf_counter()
                s.run() # type: ignore
                t = time.perf_counter() - timer
                print(f'{s.__name__} completed in {t}')
        except Exception as c: # pylint: disable=broad-except
            from shared import repo
            repo.create_issue(f'Error running task {s.__name__}', 'CLI', 'CLI', 'PennyDreadfulMTG/perf-reports', exception=c)
            error = c

    if app_context is not None:
        app_context.__exit__(None, None, None)
    if error:
        raise error 
Example #29
Source File: perf.py    From Penny-Dreadful-Tools with GNU General Public License v3.0 5 votes vote down vote up
def took(start_time: float) -> float:
    return time.perf_counter() - start_time 
Example #30
Source File: perf.py    From Penny-Dreadful-Tools with GNU General Public License v3.0 5 votes vote down vote up
def test(f: Callable, limit: float) -> None:
    begin = time.perf_counter()
    f()
    duration = time.perf_counter() - begin
    print(duration)
    assert duration <= limit