Python time.process_time() Examples

The following are code examples for showing how to use time.process_time(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: fg21sim   Author: liweitianux   File: foregrounds.py    (license) View Source Project 15 votes vote down vote up
def simulate_component(self, compID):
        """
        Do simulation for the specified foreground component.
        """
        logger.info("==================================================")
        logger.info(">>> Simulate component: %s <<<" % compID)
        logger.info("==================================================")
        t1_start = time.perf_counter()
        t2_start = time.process_time()

        comp_cls = COMPONENTS_ALL[compID]
        comp_obj = comp_cls(self.configs)
        comp_obj.preprocess()
        skyfiles = comp_obj.simulate()
        if self.products:
            self.products.add_component(compID, skyfiles)
        comp_obj.postprocess()

        t1_stop = time.perf_counter()
        t2_stop = time.process_time()
        logger.info("--------------------------------------------------")
        logger.info("Elapsed time: %.1f [min]" % ((t1_stop-t1_start)/60))
        logger.info("CPU process time: %.1f [min]" % ((t2_stop-t2_start)/60))
        logger.info("--------------------------------------------------") 
Example 2
Project: zmirror   Author: aploium   File: test_regex.py    (MIT License) View Source Project 6 votes vote down vote up
def performance_test__regex_basic_mirrorlization(self):
        """? regex_basic_mirrorlization ??????"""
        from more_configs.config_google_and_zhwikipedia import target_domain, external_domains
        self.reload_zmirror(configs_dict=dict(
            target_domain=target_domain,
            external_domains=external_domains,
        ))
        from time import process_time
        reg_func = self.zmirror.response_text_basic_mirrorlization
        print(self.zmirror.regex_basic_mirrorlization.pattern)

        with open(zmirror_file("tests/sample/google_home.html"), "r", encoding="utf-8") as fp:
            text = fp.read()

        start_time = process_time()
        for _ in range(1000):
            reg_func(text)
        print("100x google_home.html", process_time() - start_time) 
Example 3
Project: kripodb   Author: 3D-e-Chem   File: frozen.py    (license) View Source Project 6 votes vote down vote up
def _ingest_pairs(self, pairs, oid2nid, frame_size, limit, single_sided):
        oid2nid_v = np.vectorize(oid2nid.get)
        # whole pairs set does not fit in memory, so split it in frames with `frame_size` number of pairs.
        for start in range(0, limit, frame_size):
            stop = frame_size + start
            t1 = process_time()
            six.print_('Fetching pairs {0}:{1} of {2} ... '.format(start, stop, limit), end='', flush=True)
            raw_frame = pairs.read(start=start, stop=stop)
            t2 = process_time()
            six.print_('{0}s, Parsing ... '.format(int(t2 - t1)), flush=True)
            frame = self._translate_frame(raw_frame, oid2nid_v, single_sided)
            t3 = process_time()
            six.print_('Writing ... '.format(int(t3 - t2)), flush=True)
            # alternate direction, to make use of cached chunks of prev frame
            self._ingest_pairs_frame(frame)
            del frame
            t4 = process_time()
            six.print_('{0}s, Done with {1}:{2} in {3}s'.format(int(t4 - t3), start, stop, int(t4 - t1)), flush=True) 
Example 4
Project: fg21sim   Author: liweitianux   File: console.py    (license) View Source Project 6 votes vote down vote up
def _task_test(self, **kwargs):
        """
        Test task ...
        """
        import time
        t1_start = time.perf_counter()
        t2_start = time.process_time()
        logger.info("Console TEST task: START ...")
        for i in range(kwargs["time"]):
            logger.info("Console TEST task: slept {0} seconds ...".format(i))
            time.sleep(1)
        logger.info("Console TEST task: DONE!")
        t1_stop = time.perf_counter()
        t2_stop = time.process_time()
        logger.info("Elapsed time: {0:.3f} (s)".format(t1_stop - t1_start))
        logger.info("CPU process time: {0:.3f} (s)".format(t2_stop - t2_start))
        return (True, None) 
Example 5
Project: web_ctp   Author: molebot   File: test_time.py    (license) View Source Project 6 votes vote down vote up
def test_get_clock_info(self):
        clocks = ['clock', 'perf_counter', 'process_time', 'time']
        if hasattr(time, 'monotonic'):
            clocks.append('monotonic')

        for name in clocks:
            info = time.get_clock_info(name)
            #self.assertIsInstance(info, dict)
            self.assertIsInstance(info.implementation, str)
            self.assertNotEqual(info.implementation, '')
            self.assertIsInstance(info.monotonic, bool)
            self.assertIsInstance(info.resolution, float)
            # 0.0 < resolution <= 1.0
            self.assertGreater(info.resolution, 0.0)
            self.assertLessEqual(info.resolution, 1.0)
            self.assertIsInstance(info.adjustable, bool)

        self.assertRaises(ValueError, time.get_clock_info, 'xxx') 
Example 6
Project: ouroboros   Author: pybee   File: test_time.py    (license) View Source Project 6 votes vote down vote up
def test_get_clock_info(self):
        clocks = ['clock', 'perf_counter', 'process_time', 'time']
        if hasattr(time, 'monotonic'):
            clocks.append('monotonic')

        for name in clocks:
            info = time.get_clock_info(name)
            #self.assertIsInstance(info, dict)
            self.assertIsInstance(info.implementation, str)
            self.assertNotEqual(info.implementation, '')
            self.assertIsInstance(info.monotonic, bool)
            self.assertIsInstance(info.resolution, float)
            # 0.0 < resolution <= 1.0
            self.assertGreater(info.resolution, 0.0)
            self.assertLessEqual(info.resolution, 1.0)
            self.assertIsInstance(info.adjustable, bool)

        self.assertRaises(ValueError, time.get_clock_info, 'xxx') 
Example 7
Project: kbe_server   Author: xiaohaoppy   File: test_time.py    (license) View Source Project 6 votes vote down vote up
def test_get_clock_info(self):
        clocks = ['clock', 'perf_counter', 'process_time', 'time']
        if hasattr(time, 'monotonic'):
            clocks.append('monotonic')

        for name in clocks:
            info = time.get_clock_info(name)
            #self.assertIsInstance(info, dict)
            self.assertIsInstance(info.implementation, str)
            self.assertNotEqual(info.implementation, '')
            self.assertIsInstance(info.monotonic, bool)
            self.assertIsInstance(info.resolution, float)
            # 0.0 < resolution <= 1.0
            self.assertGreater(info.resolution, 0.0)
            self.assertLessEqual(info.resolution, 1.0)
            self.assertIsInstance(info.adjustable, bool)

        self.assertRaises(ValueError, time.get_clock_info, 'xxx') 
Example 8
Project: gcn_metric_learning   Author: sk1712   File: models_siamese.py    (license) View Source Project 6 votes vote down vote up
def evaluate(self, data, labels, site, sess=None):
        """
        Runs one evaluation against the full epoch of data.
        Return the precision and the number of correct predictions.
        Batch evaluation saves memory and enables this to run on smaller GPUs.

        sess: the session in which the model has been trained.
        op: the Tensor that returns the number of correct predictions.
        data: size N x M
            N: number of signals (samples)
            M: number of vertices (features)
        labels: size N
            N: number of signals (samples)
        """
        t_process, t_wall = time.process_time(), time.time()
        scores, loss = self.predict(data, labels, site, sess)

        fpr, tpr, _ = roc_curve(labels, scores)
        roc_auc = auc(fpr, tpr)

        string = 'samples: {:d}, AUC : {:.2f}, loss: {:.4e}'.format(len(labels), roc_auc, loss)

        if sess is None:
            string += '\ntime: {:.0f}s (wall {:.0f}s)'.format(time.process_time() - t_process, time.time() - t_wall)
        return string, roc_auc, loss, scores 
Example 9
Project: DenoiseAverage   Author: Pella86   File: LogTimes.py    (license) View Source Project 5 votes vote down vote up
def __init__(self):
        self.starttime = time.process_time()
        self.nowtime = time.process_time()
        self.lastcall = time.process_time() 
Example 10
Project: DenoiseAverage   Author: Pella86   File: LogTimes.py    (license) View Source Project 5 votes vote down vote up
def __str__(self):
        self.nowtime = time.process_time()
        subtime =  self.nowtime - self.lastcall
        subtime = self.convert_in_ddhhss(subtime)
        s  = "Elapsed time for subprocess: {0}\n".format(subtime)
        
        totaltime = self.nowtime - self.starttime
        totaltime = self.convert_in_ddhhss(totaltime)
        s += "Time total elapsed: {0}".format(totaltime)
        
        self.lastcall = time.process_time()
        return s 
Example 11
Project: Software-Architecture-with-Python   Author: PacktPublishing   File: common_items.py    (license) View Source Project 5 votes vote down vote up
def timer():
    """ A simple timing function for routines """

    try:
        start = timer_func()
        yield
    except Exception as e:
        print(e)
        raise
    finally:
        end = timer_func()
        print ('Time spent=>',1000.0*(end - start),'ms.') 
Example 12
Project: django-web-profiler   Author: MicroPyramid   File: middleware.py    (license) View Source Project 5 votes vote down vote up
def process_request(self, request):
        self._start_time = time.time()
        self._start_rusage = resource.getrusage(resource.RUSAGE_SELF)
        self.t = time.process_time()
        response = super(DebugLoggingMiddleware, self).process_request(request)

        return response 
Example 13
Project: bpy_lambda   Author: bcongdon   File: fbx_utils.py    (license) View Source Project 5 votes vote down vote up
def level_down(self, message=""):
            if not self.ref_time:
                if message:
                    print(message)
                return
            ref_time = self.ref_time[self.level]
            print("\t" * self.level,
                  "\tDone (%f sec)\n" % ((time.process_time() - ref_time) if ref_time is not None else 0.0),
                  sep="")
            if message:
                print("\t" * self.level, message, sep="")
            del self.ref_time[self.level]
            self.level -= 1 
Example 14
Project: bpy_lambda   Author: bcongdon   File: fbx_utils.py    (license) View Source Project 5 votes vote down vote up
def step(self, message=""):
            ref_time = self.ref_time[self.level]
            curr_time = time.process_time()
            if ref_time is not None:
                print("\t" * self.level, "\tDone (%f sec)\n" % (curr_time - ref_time), sep="")
            self.ref_time[self.level] = curr_time
            print("\t" * self.level, message, sep="") 
Example 15
Project: challenge-201608-refactor   Author: cohpy   File: JPTimer.py    (license) View Source Project 5 votes vote down vote up
def __enter__(self):
        """
        Start timing something.

        Note - the with statement will invoke this automatically.
        :return: an instance of this class
        """
        self.start = time.process_time()
        return self 
Example 16
Project: challenge-201608-refactor   Author: cohpy   File: JPTimer.py    (license) View Source Project 5 votes vote down vote up
def __exit__(self, *args):
        """
        Stop timing something and calculate the difference.

        Note - the with statement will invoke this automatically.
        :param args:
        :return:
        """
        self.end = time.process_time()
        self.interval = self.end - self.start 
Example 17
Project: SimPype   Author: Mallets   File: simulation.py    (license) View Source Project 5 votes vote down vote up
def run(self, *args, **kwargs):
		""" Run the simulation environment using SimPy environment. """
		self.log.init()
		sptime = time.process_time()
		self.env.run(*args, **kwargs)
		eptime = time.process_time()
		# Save some simulation parameters
		self.log.write("Simulation Seed: "+ str(self.seed))
		self.log.write("Simulation Time: " + "%.9f" % self.env.now)
		self.log.write("Execution Time: " + "%.9f" % (eptime - sptime)) 
Example 18
Project: discontinuous-hmc   Author: aki-nishimura   File: dhmc_sampler.py    (license) View Source Project 5 votes vote down vote up
def run_sampler(self, theta0, dt_range, nstep_range, n_burnin, n_sample, seed=None, n_update=10):
        """Run DHMC and return samples and some additional info."""

        np.random.seed(seed)

        # Run HMC.
        theta = theta0
        n_per_update = math.ceil((n_burnin + n_sample) / n_update)
        pathlen_ave = 0
        samples = np.zeros((n_sample + n_burnin, len(theta)))
        logp_samples = np.zeros(n_sample + n_burnin)
        accept_prob = np.zeros(n_sample + n_burnin)

        tic = time.process_time()  # Start clock
        logp, grad, aux = self.f(theta)
        for i in range(n_sample + n_burnin):
            dt = np.random.uniform(dt_range[0], dt_range[1])
            nstep = np.random.randint(nstep_range[0], nstep_range[1] + 1)
            theta, logp, grad, aux, accept_prob[i], pathlen \
                = self.hmc(dt, nstep, theta, logp, grad, aux)
            pathlen_ave = i / (i + 1) * pathlen_ave + 1 / (i + 1) * pathlen
            samples[i, :] = theta
            logp_samples[i] = logp
            if (i + 1) % n_per_update == 0:
                print('{:d} iterations have been completed.'.format(i + 1))

        toc = time.process_time()
        time_elapsed = toc - tic
        print(('The average path length of each DHMC iteration was '
               '{:.2f}.'.format(pathlen_ave)))

        return samples, logp_samples, accept_prob, pathlen_ave, time_elapsed 
Example 19
Project: chat   Author: Decalogue   File: mytools.py    (license) View Source Project 5 votes vote down vote up
def time_me(info="used", format_string="ms"):
    """Performance analysis - time

    Decorator of time performance analysis.
    ????——????
    ????(wall clock time, elapsed time)???????????????????????
    ???????????CPU???????????????C++/Windows?????<time.h>???
    ??????????????????
    1.time.clock()??????????????CPU????????????????time.time()????
    time.clock()?????????????UNIX?????????"????"?????????????????
    ??WINDOWS????????????????????????????????????
    ???????????????????WIN32?QueryPerformanceCounter()???????????????
    2.time.perf_counter()?????????????????????????????
    ???????????????????????
    3.time.process_time()???????

    Args:
        info: Customize print info. ????????
        format_string: Specifies the timing unit. ?????????'s': ??'ms': ???
            Defaults to 's'.
    """
    def _time_me(func):
        @wraps(func)
        def _wrapper(*args, **kwargs):
            start = time.clock()
            # start = time.perf_counter()
            # start = time.process_time()
            result = func(*args, **kwargs)
            end = time.clock()
            if format_string == "s":
                print("%s %s %s"%(func.__name__, info, end - start), "s")
            elif format_string == "ms":
                print("%s %s %s" % (func.__name__, info, 1000*(end - start)), "ms")
            return result
        return _wrapper
    return _time_me 
Example 20
Project: skan   Author: jni   File: bench_skan.py    (license) View Source Project 5 votes vote down vote up
def timer():
    time = []
    t0 = process_time()
    yield time
    t1 = process_time()
    time.append(t1 - t0) 
Example 21
Project: fully-convolutional-network-semantic-segmentation   Author: alecng94   File: fcn_predict.py    (license) View Source Project 5 votes vote down vote up
def predict(testImgPath, imgDir, clipSize, net):

	# get file name
	pathArr = testImgPath.split('/')
	tmpFileName = pathArr[len(pathArr) - 1]
	filename = os.path.splitext(tmpFileName)[0]

	# preprocess image
	processedImg = preprocessImg(testImgPath, clipSize)

	# reshape image to be put into data layer
	# shape for input (data blob is N x C x H x W)
	net.blobs['data'].reshape(1, *processedImg.shape)
	print('Predicting...')
	net.blobs['data'].data[...] = processedImg

	# run net and take argmax for prediction
	t = time.process_time()
	net.forward()
	elapsed_time = time.process_time() - t
	out = net.blobs['score'].data[0].argmax(axis=0)
	print("Prediction time: %.3f" % elapsed_time)

	print('Saving...')
	savePrediction(imgDir, out, filename, testImgPath)
	print('Done processing image ' + filename)

	return elapsed_time


# @SUMMARY 	: saves output of neural network into four different formats describe above
# @PARAM	: (imgDir) image target directory
# @PARAM	: (out) output of neural network
# @PARAM	: (filename) to save as
# @PARAM	: (testImgPath) for segmentation 
Example 22
Project: Fusion360AddinSkeleton   Author: tapnair   File: Fusion360DebugUtilities.py    (license) View Source Project 5 votes vote down vote up
def perf_log(log, function_reference, command, identifier=''):
    log.append((function_reference, command, identifier, time.process_time())) 
Example 23
Project: web_ctp   Author: molebot   File: test_time.py    (license) View Source Project 5 votes vote down vote up
def test_process_time(self):
        # process_time() should not include time spend during a sleep
        start = time.process_time()
        time.sleep(0.100)
        stop = time.process_time()
        # use 20 ms because process_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('process_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable) 
Example 24
Project: feeluown-core   Author: cosven   File: decorators.py    (license) View Source Project 5 votes vote down vote up
def log_exectime(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        t = time.process_time()
        result = func(*args, **kwargs)
        elapsed_time = time.process_time() - t
        logger.info('function %s executed time: %f ms'
                    % (func.__name__, elapsed_time * 1000))
        return result
    return wrapper 
Example 25
Project: blender-addons   Author: scorpion81   File: fbx_utils.py    (license) View Source Project 5 votes vote down vote up
def level_down(self, message=""):
            if not self.ref_time:
                if message:
                    print(message)
                return
            ref_time = self.ref_time[self.level]
            print("\t" * self.level,
                  "\tDone (%f sec)\n" % ((time.process_time() - ref_time) if ref_time is not None else 0.0),
                  sep="")
            if message:
                print("\t" * self.level, message, sep="")
            del self.ref_time[self.level]
            self.level -= 1 
Example 26
Project: blender-addons   Author: scorpion81   File: fbx_utils.py    (license) View Source Project 5 votes vote down vote up
def step(self, message=""):
            ref_time = self.ref_time[self.level]
            curr_time = time.process_time()
            if ref_time is not None:
                print("\t" * self.level, "\tDone (%f sec)\n" % (curr_time - ref_time), sep="")
            self.ref_time[self.level] = curr_time
            print("\t" * self.level, message, sep="") 
Example 27
Project: neuralmonkey   Author: ufal   File: tf_manager.py    (license) View Source Project 5 votes vote down vote up
def execute(self,
                dataset: Dataset,
                execution_scripts,
                train=False,
                compute_losses=True,
                summaries=True,
                batch_size=None,
                log_progress: int = 0) -> List[ExecutionResult]:
        if batch_size is None:
            batch_size = len(dataset)
        batched_dataset = dataset.batch_dataset(batch_size)
        last_log_time = time.process_time()

        batch_results = [
            [] for _ in execution_scripts]  # type: List[List[ExecutionResult]]
        for batch_id, batch in enumerate(batched_dataset):
            if (time.process_time() - last_log_time > log_progress
                    and log_progress > 0):
                log("Processed {} examples.".format(batch_id * batch_size))
                last_log_time = time.process_time()
            executables = [s.get_executable(compute_losses=compute_losses,
                                            summaries=summaries,
                                            num_sessions=len(self.sessions))
                           for s in execution_scripts]

            while not all(ex.result is not None for ex in executables):
                self._run_executables(batch, executables, train)

            for script_list, executable in zip(batch_results, executables):
                script_list.append(executable.result)

        collected_results = []  # type: List[ExecutionResult]
        for result_list in batch_results:
            collected_results.append(reduce_execution_results(result_list))

        return collected_results 
Example 28
Project: neuralmonkey   Author: ufal   File: learning_utils.py    (license) View Source Project 5 votes vote down vote up
def _is_logging_time(step: int, logging_period_batch: int,
                     last_log_time: float, logging_period_time: int):
    if logging_period_batch is not None:
        return step % logging_period_batch == logging_period_batch - 1
    return last_log_time + logging_period_time < time.process_time() 
Example 29
Project: neuralmonkey   Author: ufal   File: tf_manager.py    (license) View Source Project 5 votes vote down vote up
def execute(self,
                dataset: Dataset,
                execution_scripts,
                train=False,
                compute_losses=True,
                summaries=True,
                batch_size=None,
                log_progress: int = 0) -> List[ExecutionResult]:
        if batch_size is None:
            batch_size = len(dataset)
        batched_dataset = dataset.batch_dataset(batch_size)
        last_log_time = time.process_time()

        batch_results = [
            [] for _ in execution_scripts]  # type: List[List[ExecutionResult]]
        for batch_id, batch in enumerate(batched_dataset):
            if (time.process_time() - last_log_time > log_progress
                    and log_progress > 0):
                log("Processed {} examples.".format(batch_id * batch_size))
                last_log_time = time.process_time()
            executables = [s.get_executable(compute_losses=compute_losses,
                                            summaries=summaries,
                                            num_sessions=len(self.sessions))
                           for s in execution_scripts]

            while not all(ex.result is not None for ex in executables):
                self._run_executables(batch, executables, train)

            for script_list, executable in zip(batch_results, executables):
                script_list.append(executable.result)

        collected_results = []  # type: List[ExecutionResult]
        for result_list in batch_results:
            collected_results.append(reduce_execution_results(result_list))

        return collected_results 
Example 30
Project: neuralmonkey   Author: ufal   File: learning_utils.py    (license) View Source Project 5 votes vote down vote up
def _is_logging_time(step: int, logging_period_batch: int,
                     last_log_time: float, logging_period_time: int):
    if logging_period_batch is not None:
        return step % logging_period_batch == logging_period_batch - 1
    return last_log_time + logging_period_time < time.process_time() 
Example 31
Project: neuralmonkey   Author: ufal   File: tf_manager.py    (license) View Source Project 5 votes vote down vote up
def execute(self,
                dataset: Dataset,
                execution_scripts,
                train=False,
                compute_losses=True,
                summaries=True,
                batch_size=None,
                log_progress: int = 0) -> List[ExecutionResult]:
        if batch_size is None:
            batch_size = len(dataset)
        batched_dataset = dataset.batch_dataset(batch_size)
        last_log_time = time.process_time()

        batch_results = [
            [] for _ in execution_scripts]  # type: List[List[ExecutionResult]]
        for batch_id, batch in enumerate(batched_dataset):
            if (time.process_time() - last_log_time > log_progress
                    and log_progress > 0):
                log("Processed {} examples.".format(batch_id * batch_size))
                last_log_time = time.process_time()
            executables = [s.get_executable(compute_losses=compute_losses,
                                            summaries=summaries,
                                            num_sessions=len(self.sessions))
                           for s in execution_scripts]

            while not all(ex.result is not None for ex in executables):
                self._run_executables(batch, executables, train)

            for script_list, executable in zip(batch_results, executables):
                script_list.append(executable.result)

        collected_results = []  # type: List[ExecutionResult]
        for result_list in batch_results:
            collected_results.append(reduce_execution_results(result_list))

        return collected_results 
Example 32
Project: cython-workshop   Author: hroncok   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def solve(impl='python'):
    if impl == 'cython':
        solvercls = csolver.CBruteSolver
    else:
        solvercls = solver.BruteSolver
    try:
        os.mkdir('data/' + impl)
    except FileExistsError:
        pass
    for filename in sorted(glob.glob('data/*.inst.dat')):
        print(filename)
        loaded_data = list(dataloader.load_input(filename))
        count = loaded_data[0]['count']
        correct = list(dataloader.load_provided_results(
            'data/knap_{0:02d}.sol.dat'.format(count)))
        outname = filename.replace('.inst.dat', '.results.jsons')
        outname = outname.replace('data/', 'data/' + impl + '/')
        with open(outname, 'w') as f:
            filestartime = time.process_time()
            for idx, backpack in enumerate(loaded_data):
                startime = time.process_time()
                s = solvercls(backpack)
                backpack['maxcombo'], backpack['maxcost'] = s.solve()
                endtime = time.process_time()
                delta = endtime - startime
                backpack['time'] = delta
                assert backpack['maxcost'] == correct[idx]['maxcost']
                del backpack['items']
                f.write(json.dumps(backpack) + '\n')
            fileendtime = time.process_time()
            delta = fileendtime - filestartime
            f.write('{}\n'.format(delta)) 
Example 33
Project: CryExport   Author: britalmeida   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def save_file(operator, context, filepath="", use_selection=False, **kwargs):

    print('CryEngine export starting... %r' % filepath)
    start_time = time.process_time()
    try:
        file = open(filepath, "w", encoding="utf8", newline="\n")
    except:
        import traceback
        traceback.print_exc()
        operator.report({'ERROR'}, "Couldn't open file %r" % filepath)
        return {'CANCELLED'}

    fw = file.write

    fw('hello')

    file.close()

    # copy all collected files.
    #bpy_extras.io_utils.path_reference_copy(copy_set)

    print('export finished in %.4f sec.' % (time.process_time() - start_time))
    return {'FINISHED'}


# UI ########################################################################## 
Example 34
Project: ouroboros   Author: pybee   File: profile.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, timer=None, bias=None):
        self.timings = {}
        self.cur = None
        self.cmd = ""
        self.c_func_name = ""

        if bias is None:
            bias = self.bias
        self.bias = bias     # Materialize in local dict for lookup speed.

        if not timer:
            self.timer = self.get_time = time.process_time
            self.dispatcher = self.trace_dispatch_i
        else:
            self.timer = timer
            t = self.timer() # test out timer function
            try:
                length = len(t)
            except TypeError:
                self.get_time = timer
                self.dispatcher = self.trace_dispatch_i
            else:
                if length == 2:
                    self.dispatcher = self.trace_dispatch
                else:
                    self.dispatcher = self.trace_dispatch_l
                # This get_time() implementation needs to be defined
                # here to capture the passed-in timer in the parameter
                # list (for performance).  Note that we can't assume
                # the timer() result contains two values in all
                # cases.
                def get_time_timer(timer=timer, sum=sum):
                    return sum(timer())
                self.get_time = get_time_timer
        self.t = self.get_time()
        self.simulate_call('profiler')

    # Heavily optimized dispatch routine for os.times() timer 
Example 35
Project: ouroboros   Author: pybee   File: test_time.py    (license) View Source Project 5 votes vote down vote up
def test_process_time(self):
        # process_time() should not include time spend during a sleep
        start = time.process_time()
        time.sleep(0.100)
        stop = time.process_time()
        # use 20 ms because process_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('process_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable) 
Example 36
Project: kbe_server   Author: xiaohaoppy   File: profile.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, timer=None, bias=None):
        self.timings = {}
        self.cur = None
        self.cmd = ""
        self.c_func_name = ""

        if bias is None:
            bias = self.bias
        self.bias = bias     # Materialize in local dict for lookup speed.

        if not timer:
            self.timer = self.get_time = time.process_time
            self.dispatcher = self.trace_dispatch_i
        else:
            self.timer = timer
            t = self.timer() # test out timer function
            try:
                length = len(t)
            except TypeError:
                self.get_time = timer
                self.dispatcher = self.trace_dispatch_i
            else:
                if length == 2:
                    self.dispatcher = self.trace_dispatch
                else:
                    self.dispatcher = self.trace_dispatch_l
                # This get_time() implementation needs to be defined
                # here to capture the passed-in timer in the parameter
                # list (for performance).  Note that we can't assume
                # the timer() result contains two values in all
                # cases.
                def get_time_timer(timer=timer, sum=sum):
                    return sum(timer())
                self.get_time = get_time_timer
        self.t = self.get_time()
        self.simulate_call('profiler')

    # Heavily optimized dispatch routine for os.times() timer 
Example 37
Project: kbe_server   Author: xiaohaoppy   File: test_time.py    (license) View Source Project 5 votes vote down vote up
def test_process_time(self):
        # process_time() should not include time spend during a sleep
        start = time.process_time()
        time.sleep(0.100)
        stop = time.process_time()
        # use 20 ms because process_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('process_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable) 
Example 38
Project: py-prng   Author: czechnology   File: basic_tests.py    (license) View Source Project 5 votes vote down vote up
def run_all(generator, n_bits, sig_level, continuous=False, print_log=False):
    # if we want all the tests to be applied to the *same* bit sequence,
    # we need to pre-compute it and create a static generator
    if not continuous:
        ts = time()
        sequence = generator.random_bytes((n_bits // 8) + 16)
        print(sequence)
        generator = StaticSequenceGenerator(seq=sequence)
        if print_log:
            print("(Sequence pre-computed in", nicer_time(time() - ts) + ')', flush=True)

    if not continuous:
        generator.rewind()  # rewind
    tf = frequency_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    ts = serial_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tp = poker_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tr = runs_test(generator, n_bits, sig_level=sig_level)

    if not continuous:
        generator.rewind()  # rewind
    tac = autocorrelation_test(generator, n_bits, d=100, sig_level=sig_level)

    return tf, ts, tp, tr, tac 
Example 39
Project: FusionVenter   Author: tapnair   File: Fusion360DebugUtilities.py    (license) View Source Project 5 votes vote down vote up
def perf_log(log, function_reference, command, identifier=''):
    log.append((function_reference, command, identifier, time.process_time())) 
Example 40
Project: swarmops   Author: Hvass-Labs   File: Timer.py    (license) View Source Project 5 votes vote down vote up
def __init__(self):
        """
        Start the timer.

        :return: Object instance.
        """

        # Note that time.process_time() doesn't work with multiprocessing.

        self.start_time = time.time()
        self.end_time = self.start_time 
Example 41
Project: cps2-gfx-editor   Author: goosechooser   File: concurrency_sample.py    (license) View Source Project 5 votes vote down vote up
def pool_sprites(filepath):
    log = logging.getLogger('pool_sprites')
    #log.setLevel(logging.INFO)

    sprites = helper.fromlua(filepath)
    filename = filepath.split("/")[2]

    log.info("starting %s", filepath)
    time_point1 = time.process_time()

    for i, sprite in enumerate(sprites):
        tiles2d = tile_printer.make_tiles_mmap(GFX_MM, sprite.addrs2d())
        sprites[i].tiles = helper.flatten_list(tiles2d)

    time_point2 = time.process_time()
    delta_t = time_point2 - time_point1
    #log.info("making sprites took %s to complete", delta_t)

    time_point3 = time.process_time()
    put_sprites(sprites, OUTPUT_FOLDER + filename[:-4])
    time_point4 = time.process_time()

    delta_t2 = time_point4 - time_point3
    #log.info("putting sprites took %s to complete", delta_t2)
    log.info("ending %s", filepath)
    return delta_t, delta_t2 
Example 42
Project: FusionCSVtoOutput   Author: tapnair   File: Fusion360DebugUtilities.py    (license) View Source Project 5 votes vote down vote up
def perf_log(log, function_reference, command, identifier=''):
    log.append((function_reference, command, identifier, time.process_time())) 
Example 43
Project: zmirror   Author: aploium   File: zmirror.py    (MIT License) View Source Project 4 votes vote down vote up
def generate_our_response():
    """
    ???????
    :rtype: Response
    """
    # copy and parse remote response
    resp = copy_response(is_streamed=parse.streamed_our_response)

    if parse.time["req_time_header"] >= 0.00001:
        parse.set_extra_resp_header('X-Header-Req-Time', "%.4f" % parse.time["req_time_header"])
    if parse.time.get("start_time") is not None and not parse.streamed_our_response:
        # remote request time should be excluded when calculating total time
        parse.set_extra_resp_header('X-Body-Req-Time', "%.4f" % parse.time["req_time_body"])
        parse.set_extra_resp_header('X-Compute-Time',
                                    "%.4f" % (process_time() - parse.time["start_time"]))

    parse.set_extra_resp_header('X-Powered-By', 'zmirror/%s' % CONSTS.__VERSION__)

    if developer_dump_all_traffics and not parse.streamed_our_response:
        dump_zmirror_snapshot("traffic")

    return resp 
Example 44
Project: bpy_lambda   Author: bcongdon   File: stl_utils.py    (license) View Source Project 4 votes vote down vote up
def read_stl(filepath):
    """
    Return the triangles and points of an stl binary file.

    Please note that this process can take lot of time if the file is
    huge (~1m30 for a 1 Go stl file on an quad core i7).

    - returns a tuple(triangles, triangles' normals, points).

      triangles
          A list of triangles, each triangle as a tuple of 3 index of
          point in *points*.

      triangles' normals
          A list of vectors3 (tuples, xyz).

      points
          An indexed list of points, each point is a tuple of 3 float
          (xyz).

    Example of use:

       >>> tris, tri_nors, pts = read_stl(filepath)
       >>> pts = list(pts)
       >>>
       >>> # print the coordinate of the triangle n
       >>> print(pts[i] for i in tris[n])
    """
    import time
    start_time = time.process_time()

    tris, tri_nors, pts = [], [], ListDict()

    with open(filepath, 'rb') as data:
        # check for ascii or binary
        gen = _ascii_read if _is_ascii_file(data) else _binary_read

        for nor, pt in gen(data):
            # Add the triangle and the point.
            # If the point is allready in the list of points, the
            # index returned by pts.add() will be the one from the
            # first equal point inserted.
            tris.append([pts.add(p) for p in pt])
            tri_nors.append(nor)

    print('Import finished in %.4f sec.' % (time.process_time() - start_time))

    return tris, tri_nors, pts.list 
Example 45
Project: fg21sim   Author: liweitianux   File: console.py    (license) View Source Project 4 votes vote down vote up
def _task_default(self, **kwargs):
        """
        The default task that this console manages, which performs
        the foregrounds simulations.

        Returns
        -------
        success : bool
            Whether the task successfully finished?
        error : str
            Error message if the task failed

        NOTE
        ----
        The task is synchronous and may be computationally intensive
        (i.e., CPU-bound rather than IO/event-bound), therefore,
        threads (or processes) are required to make it non-blocking
        (i.e., asynchronous).

        References:
        [1] https://stackoverflow.com/a/32164711/4856091
        """
        t1_start = time.perf_counter()
        t2_start = time.process_time()
        logger.info("Console DEFAULT task: START ...")
        logger.info("Preparing to start foregrounds simulations ...")
        logger.info("Checking the configurations ...")
        self.configs.check_all()
        #
        logger.info("Importing modules + Numba JIT, waiting ...")
        from ...foregrounds import Foregrounds
        #
        fg = Foregrounds(self.configs)
        fg.preprocess()
        fg.simulate()
        fg.postprocess()
        logger.info("Foregrounds simulations DONE!")
        logger.info("Console DEFAULT task: DONE!")
        t1_stop = time.perf_counter()
        t2_stop = time.process_time()
        logger.info("Elapsed time: {0:.3f} (s)".format(t1_stop - t1_start))
        logger.info("CPU process time: {0:.3f} (s)".format(t2_stop - t2_start))
        # NOTE: always return a tuple of (success, error)
        return (True, None) 
Example 46
Project: oasis   Author: ngmarchant   File: experiments.py    (license) View Source Project 4 votes vote down vote up
def repeat_expt(smplr, n_expts, n_labels, output_file = None):
    """
    Parameters
    ----------
    smplr : sub-class of PassiveSampler
        sampler must have a sample_distinct method, reset method and ...

    n_expts : int
        number of expts to run

    n_labels : int
        number of labels to query from the oracle in each expt
    """

    FILTERS = tables.Filters(complib='zlib', complevel=5)

    max_iter = smplr._max_iter
    n_class = smplr._n_class
    if max_iter < n_labels:
        raise ValueError("Cannot query {} labels. Sampler ".format(n_labels) +
                         "instance supports only {} iterations".format(max_iter))

    if output_file is None:
        # Use current date/time as filename
        output_file = 'expt_' + time.strftime("%d-%m-%Y_%H:%M:%S") + '.h5'
    logging.info("Writing output to {}".format(output_file))

    f = tables.open_file(output_file, mode='w', filters=FILTERS)
    float_atom = tables.Float64Atom()
    bool_atom = tables.BoolAtom()
    int_atom = tables.Int64Atom()

    array_F = f.create_carray(f.root, 'F_measure', float_atom, (n_expts, n_labels, n_class))
    array_s = f.create_carray(f.root, 'n_iterations', int_atom, (n_expts, 1))
    array_t = f.create_carray(f.root, 'CPU_time', float_atom, (n_expts, 1))

    logging.info("Starting {} experiments".format(n_expts))
    for i in range(n_expts):
        if i%np.ceil(n_expts/10).astype(int) == 0:
            logging.info("Completed {} of {} experiments".format(i, n_expts))
        ti = time.process_time()
        smplr.reset()
        smplr.sample_distinct(n_labels)
        tf = time.process_time()
        if hasattr(smplr, 'queried_oracle_'):
            array_F[i,:,:] = smplr.estimate_[smplr.queried_oracle_]
        else:
            array_F[i,:,:] = smplr.estimate_
        array_s[i] = smplr.t_
        array_t[i] = tf - ti
    f.close()

    logging.info("Completed all experiments") 
Example 47
Project: MDP_GridWorld   Author: abdalmoniem   File: Policy.py    (license) View Source Project 4 votes vote down vote up
def valueIteration(self, debugCallback = None, turbo = False):
		'''using the value iteration algorithm (see AI: A Modern Approach (Third ed.) pag. 652)
		   calculate the utilities for all states in the grid world
		   
		   the debugCallback must be a function that has three parameters:
				policy: that the function can use to display intermediate results
				isEnded: that the function can use to know if the valueIteration is ended
			the debugCallback must return True, and can stop the algorithm returning False
			
			the algorithm has a maximum number of iterations, in this way we can compute an
			example with a discount factor = 1 that converge.
			
			the turbo mode uses the utility vector of the (i-1)-th iteration to compute
			the utility vector of the i-th iteration. The classic approach is different because
			we compute the i-th iteration using the utility vector of the (i-1)-th iteration.
			With this algorithm, using the turbo mode, we have an improvement of 30%
		   
		   returns the number of iterations it needs for converge
		'''
		eps = Policy.valueIterationEpsilon
		dfact = self.world.discFactor
		c, r = self.world.size
		if turbo: newUv = self.utilities
		
		reiterate = True
		start = time.process_time()
		while(reiterate):
			self.numOfIterations += 1
			maxNorm = 0 #see the max norm definition in AI: A Modern Approach (Third ed.) pag. 654
			
			if not turbo: newUv = self.__createEmptyUtilityVector()
			
			for x in range(c):
				for y in range(r):
					v = self.__cellUtility(x, y) #calculate using the self.utilities (i.e. the previous step)
					if not v is None: maxNorm = max(maxNorm, abs(self.utilities[y][x] - v))
					newUv[y][x] = v #update the new utility vector that we are creating
					
			if not turbo: self.utilities = newUv
			
			if debugCallback: reiterate = debugCallback(self, False)
			
			if maxNorm <= eps * (1 - dfact)/dfact: reiterate = False

			end = time.process_time()
			self.elapsed = end - start
			if self.numOfIterations >= Policy.maxNumberOfIterations or self.elapsed > Policy.timeToLive:
				reiterate = False
				print("warning: max number of iterations exceeded")
				messagebox.showwarning("Warning", "max number of iterations exceeded")
		
		if debugCallback: reiterate = debugCallback(self, True)
					
		return self.numOfIterations 
Example 48
Project: blender-addons   Author: scorpion81   File: stl_utils.py    (license) View Source Project 4 votes vote down vote up
def read_stl(filepath):
    """
    Return the triangles and points of an stl binary file.

    Please note that this process can take lot of time if the file is
    huge (~1m30 for a 1 Go stl file on an quad core i7).

    - returns a tuple(triangles, triangles' normals, points).

      triangles
          A list of triangles, each triangle as a tuple of 3 index of
          point in *points*.

      triangles' normals
          A list of vectors3 (tuples, xyz).

      points
          An indexed list of points, each point is a tuple of 3 float
          (xyz).

    Example of use:

       >>> tris, tri_nors, pts = read_stl(filepath)
       >>> pts = list(pts)
       >>>
       >>> # print the coordinate of the triangle n
       >>> print(pts[i] for i in tris[n])
    """
    import time
    start_time = time.process_time()

    tris, tri_nors, pts = [], [], ListDict()

    with open(filepath, 'rb') as data:
        # check for ascii or binary
        gen = _ascii_read if _is_ascii_file(data) else _binary_read

        for nor, pt in gen(data):
            # Add the triangle and the point.
            # If the point is allready in the list of points, the
            # index returned by pts.add() will be the one from the
            # first equal point inserted.
            tris.append([pts.add(p) for p in pt])
            tri_nors.append(nor)

    print('Import finished in %.4f sec.' % (time.process_time() - start_time))

    return tris, tri_nors, pts.list 
Example 49
Project: ablator   Author: ablator   File: __init__.py    (license) View Source Project 4 votes vote down vote up
def which(client_user: ClientUser, functionality: Functionality) -> Optional[Availability]:
    """
    Which Flavor of the given Functionality is enabled for the user, if any?

    Returns a Flavor object that corresponds to the ClientUser's enabled functionality,
    or `None` if the user does not have any Flavor in the given Functionality.

    Use ClientUser.user_from_object to get or create a ClientUser instance from any hashable
    object (usually a string).
    """
    context = WhichContext()
    context.client_user = client_user
    context.functionality = functionality

    pipeline = [
        # roll out strategies
        check_roll_out_recall,
        check_roll_out_enable_globally,

        # retrieve availability
        get_availability,

        # check availability and switch on based on max user count
        check_for_existing_enabled_availability,
        assert_roll_out_is_not_paused,
        assert_existence_of_release,
        assert_existence_of_flavors,
        get_enabled_count,
        create_new_availability_with_random_flavor,
        enable_availability_by_user_count,
    ]

    # Go through each function in the pipeline. If it yields an Availability, we're done
    # and can return it. Otherwise, continue until we hit the end, or catch a NoAvailability
    # exception.
    # Splitting the methods up like this helps with testing, caching, and gaining an overview over
    # what actually happens through logging. Hopefully.
    start_time = time.process_time()
    for func in pipeline:
        try:
            av = func(context)
            if av:
                save_request_log_entry(
                    str(context.functionality.id),
                    str(av.flavor_id),
                    func.__name__,
                    client_user.id,
                    time.process_time() - start_time
                )
                return av
        except NoAvailability:
            save_request_log_entry(
                str(context.functionality.id),
                None,
                func.__name__,
                client_user.id,
                time.process_time() - start_time
            )
            return None
    return None 
Example 50
Project: gcn_metric_learning   Author: sk1712   File: models_siamese.py    (license) View Source Project 4 votes vote down vote up
def fit(self, train_data, train_labels, val_data, val_labels):
        t_process, t_wall = time.process_time(), time.time()
        sess = tf.Session(graph=self.graph)
        shutil.rmtree(self._get_path('summaries'), ignore_errors=True)
        writer = tf.summary.FileWriter(self._get_path('summaries'), self.graph)
        shutil.rmtree(self._get_path('checkpoints'), ignore_errors=True)
        os.makedirs(self._get_path('checkpoints'))
        path = os.path.join(self._get_path('checkpoints'), 'model')
        sess.run(self.op_init)

        # Training.
        accuracies = []
        losses = []
        indices = collections.deque()
        num_steps = int(self.num_epochs * train_data.shape[0] / self.batch_size)
        for step in range(1, num_steps+1):

            # Be sure to have used all the samples before using one a second time.
            if len(indices) < self.batch_size:
                indices.extend(np.random.permutation(train_data.shape[0]))
            idx = [indices.popleft() for i in range(self.batch_size)]

            batch_data, batch_labels = train_data[idx, :, :, :], train_labels[idx]
            if type(batch_data) is not np.ndarray:
                batch_data = batch_data.toarray()  # convert sparse matrices
            feed_dict = {self.ph_data: batch_data, self.ph_labels: batch_labels, self.ph_dropout: self.dropout}
            learning_rate, loss_average = sess.run([self.op_train, self.op_loss_average], feed_dict)

            # Periodical evaluation of the model.
            if step % self.eval_frequency == 0 or step == num_steps:
                epoch = step * self.batch_size / train_data.shape[0]
                print('step {} / {} (epoch {:.2f} / {}):'.format(step, num_steps, epoch, self.num_epochs))
                print('  learning_rate = {:.2e}, loss_average = {:.2e}'.format(learning_rate, loss_average))

                string, auc, loss, scores_summary = self.evaluate(train_data, train_labels, sess)
                print('  training {}'.format(string))

                string, auc, loss, scores_summary = self.evaluate(val_data, val_labels, sess)
                print('  validation {}'.format(string))
                print('  time: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall))

                accuracies.append(auc)
                losses.append(loss)

                # Summaries for TensorBoard.
                summary = tf.Summary()
                summary.ParseFromString(sess.run(self.op_summary, feed_dict))
                summary.value.add(tag='validation/auc', simple_value=auc)
                summary.value.add(tag='validation/loss', simple_value=loss)
                writer.add_summary(summary, step)
                
                # Save model parameters (for evaluation).
                self.op_saver.save(sess, path, global_step=step)

        print('validation accuracy: peak = {:.2f}, mean = {:.2f}'.format(max(accuracies), np.mean(accuracies[-10:])))
        writer.close()
        sess.close()
        
        t_step = (time.time() - t_wall) / num_steps
        return accuracies, losses, t_step, scores_summary