Python sys.setprofile() Examples

The following are code examples for showing how to use sys.setprofile(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: where   Author: kartverket   File: util.py    MIT License 6 votes vote down vote up
def trace_full(frame, event, arg):
    """Show full trace for a given stack frame

    This function is mainly meant for debug and can for instance be activated by inserting either
    `sys.settrace(util.trace_full)` or `sys.setprofile(util.tracefull)` in the source code where tracing should start.

    Args:
        frame:    Current frame object.
        event:    Event, not used but needed to be compatible with sys.setprofile.
        arg:      Arg, not used but needed to be compatible with sys-.setprofile.
    """
    callers = list()
    while frame:
        func_name = frame.f_code.co_name
        line_no = frame.f_lineno
        file_name = frame.f_code.co_filename
        if file_name.endswith(".py"):
            module_name = file_name[-file_name[::-1].find("/erehw/") : -3].replace("/", ".")
            callers.insert(0, "{}.{} ({})".format(module_name, func_name, line_no))

        frame = frame.f_back

    print("\n    -> ".join(callers)) 
Example 2
Project: codimension   Author: SergeySatskiy   File: base_cdm_dbg.py    GNU General Public License v3.0 6 votes vote down vote up
def set_trace(self, frame=None):
        """Starts debugging from 'frame'"""
        if frame is None:
            frame = sys._getframe().f_back  # Skip set_trace method

        if sys.version_info[0] == 2:
            stopOnHandleLine = self._dbgClient.handleLine.func_code
        else:
            stopOnHandleLine = self._dbgClient.handleLine.__code__

        frame.f_trace = self.trace_dispatch
        while frame.f_back is not None:
            # stop at erics debugger frame or a threading bootstrap
            if frame.f_back.f_code == stopOnHandleLine:
                frame.f_trace = self.trace_dispatch
                break

            frame = frame.f_back

        self.stop_everywhere = True
        sys.settrace(self.trace_dispatch)
        sys.setprofile(self._dbgClient.callTraceEnabled) 
Example 3
Project: beacon-py   Author: deepsourcelabs   File: tracer.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _trace(self, frame, event, arg):
        """The trace function which is passed to `sys.setprofile`.

        We only track the `call` event here, since we only need to
        identify the usage of code, and not actually do any profiling. For all
        other events, we exit without doing anything.
        """

        filename, lineno = frame.f_code.co_filename, frame.f_lineno

        if (self.stopped and sys.getprofile() == self._trace):
            sys.setprofile(None)
            return None

        if event == 'call' and self._should_capture(filename):
            self._capture(filename, lineno)

        return self._trace 
Example 4
Project: django-snoopy   Author: Pradeek   File: request.py    MIT License 6 votes vote down vote up
def register_request(request, settings):
        snoopy_data = {
            'request': request.path,
            'method': request.method,
            'queries': [],
            'profiler_traces': [],
            'custom_attributes': {},
            'start_time': datetime.datetime.now()
        }
        _snoopy_request.request = request
        _snoopy_request.data = snoopy_data
        _snoopy_request.settings = settings
        _snoopy_request.current_function_key = [None, None]
        from django.conf import settings as django_settings
        _snoopy_request.relevant_apps = tuple(django_settings.INSTALLED_APPS)

        app_root = get_app_root()
        _snoopy_request.app_root = app_root

        if _snoopy_request.settings.get('USE_CPROFILE'):
            _snoopy_request.profiler = cProfile.Profile()
            _snoopy_request.profiler.enable()

        if _snoopy_request.settings.get('USE_BUILTIN_PROFILER'):
            sys.setprofile(SnoopyRequest.profile) 
Example 5
Project: django-snoopy   Author: Pradeek   File: request.py    MIT License 6 votes vote down vote up
def register_response(response):
        if _snoopy_request.settings.get('USE_BUILTIN_PROFILER'):
            sys.setprofile(None)

        snoopy_data = _snoopy_request.data
        snoopy_data['end_time'] = datetime.datetime.now()
        snoopy_data['total_request_time'] = \
            (snoopy_data['end_time'] - snoopy_data['start_time'])

        if _snoopy_request.settings.get('USE_CPROFILE'):
            _snoopy_request.profiler.disable()
            profiler_result = StringIO.StringIO()
            profiler_stats = pstats.Stats(
                _snoopy_request.profiler, stream=profiler_result).sort_stats('cumulative')
            profiler_stats.print_stats()

            result = profiler_result.getvalue()
            if not _snoopy_request.settings.get('CPROFILE_SHOW_ALL_FUNCTIONS'):
                result = clean_profiler_result(result)
            snoopy_data['profiler_result'] = result
        return snoopy_data 
Example 6
Project: pyblish-win   Author: pyblish   File: profile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 7
Project: pyblish-win   Author: pyblish   File: test_sys_setprofile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 8
Project: pyblish-win   Author: pyblish   File: test_sys_setprofile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 9
Project: pyblish-win   Author: pyblish   File: test_sys_setprofile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 10
Project: pyblish-win   Author: pyblish   File: test_sys_setprofile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 11
Project: pyblish-win   Author: pyblish   File: test_sys.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_getdefaultencoding(self):
        if test.test_support.have_unicode:
            self.assertRaises(TypeError, sys.getdefaultencoding, 42)
            # can't check more than the type, as the user might have changed it
            self.assertIsInstance(sys.getdefaultencoding(), str)

    # testing sys.settrace() is done in test_sys_settrace.py
    # testing sys.setprofile() is done in test_sys_setprofile.py 
Example 12
Project: pyblish-win   Author: pyblish   File: threading.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 13
Project: where   Author: kartverket   File: util.py    MIT License 5 votes vote down vote up
def trace_source(frame, event, arg):
    """Show full trace for a given stack frame

    This function is mainly meant for debug and can for instance be activated by inserting
    `sys.setprofile(util.tracefull)` in the source code where tracing should start. This function will then be called
    whenever a function is called or returned from. It is also possible to use `sys.settrace` in a similar way.

    Args:
        frame:             Current frame object.
        event (String):    Event, not used but needed to be compatible with sys.setprofile.
        arg:               Arg, not used but needed to be compatible with sys-.setprofile.
    """
    caller = frame.f_back
    if caller is None:
        return

    file_name = caller.f_code.co_filename
    if "/where/" not in file_name:
        return

    if file_name not in _CACHE_SRC:
        with open(file_name, mode="r") as fid:
            _CACHE_SRC[file_name] = {no: ln.strip() for no, ln in enumerate(fid.readlines(), start=1)}

    line_no = caller.f_lineno
    module_name = file_name[-file_name[::-1].find("/erehw/") : -3].replace("/", ".")
    func_name = "{}.{} ({}):".format(module_name, caller.f_code.co_name, line_no)

    print("-> {:<40s} {}".format(func_name, _CACHE_SRC[file_name][line_no])) 
Example 14
Project: jawfish   Author: war-and-code   File: threading.py    MIT License 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 15
Project: NiujiaoDebugger   Author: MrSrc   File: profile.py    GNU General Public License v3.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec(cmd, globals, locals)
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 16
Project: NiujiaoDebugger   Author: MrSrc   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 17
Project: NiujiaoDebugger   Author: MrSrc   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 18
Project: NiujiaoDebugger   Author: MrSrc   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 19
Project: NiujiaoDebugger   Author: MrSrc   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 20
Project: NiujiaoDebugger   Author: MrSrc   File: test_sys.py    GNU General Public License v3.0 5 votes vote down vote up
def test_getdefaultencoding(self):
        self.assertRaises(TypeError, sys.getdefaultencoding, 42)
        # can't check more than the type, as the user might have changed it
        self.assertIsInstance(sys.getdefaultencoding(), str)

    # testing sys.settrace() is done in test_sys_settrace.py
    # testing sys.setprofile() is done in test_sys_setprofile.py 
Example 21
Project: NiujiaoDebugger   Author: MrSrc   File: threading.py    GNU General Public License v3.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 22
Project: codimension   Author: SergeySatskiy   File: clientbase_cdm_dbg.py    GNU General Public License v3.0 5 votes vote down vote up
def setCallTrace(self, enabled):
        """Sets up the call trace"""
        if enabled:
            sys.setprofile(self.profile)
            self.callTraceEnabled = self.profile
        else:
            sys.setprofile(None)
            self.callTraceEnabled = None 
Example 23
Project: codimension   Author: SergeySatskiy   File: clientbase_cdm_dbg.py    GNU General Public License v3.0 5 votes vote down vote up
def fork(self):
        """fork routine deciding which branch to follow"""
        # It does not make sense to follow something which was run via the
        # subprocess module. The subprocess module uses fork() internally,
        # so let's analyze it and do auto follow parent even if it was not
        # required explicitly.
        isPopen = False
        stackFrames = traceback.extract_stack()
        for stackFrame in stackFrames:
            if stackFrame[2] == '_execute_child':
                if stackFrame[0].endswith(os.path.sep + 'subprocess.py'):
                    isPopen = True

        if not self.forkAuto and not isPopen:
            sendJSONCommand(self.socket, METHOD_FORK_TO,
                            self.procuuid, None)
            self.eventLoop(True)
        pid = DEBUG_CLIENT_ORIG_FORK()

        if isPopen:
            # Switch to following parent
            oldFollow = self.forkChild
            self.forkChild = False

        if pid == 0:
            # child
            if not self.forkChild:
                sys.settrace(None)
                sys.setprofile(None)
                self.sessionClose(False)
        else:
            # parent
            if self.forkChild:
                sys.settrace(None)
                sys.setprofile(None)
                self.sessionClose(False)

        if isPopen:
            # Switch to what it was before
            self.forkChild = oldFollow
        return pid 
Example 24
Project: codimension   Author: SergeySatskiy   File: base_cdm_dbg.py    GNU General Public License v3.0 5 votes vote down vote up
def bootstrap(self, target, args, kwargs):
        """Bootstraps a thread"""
        try:
            # Because in the initial run method the "base debug" function is
            # set up, it's also valid for the threads afterwards.
            sys.settrace(self.trace_dispatch)

            target(*args, **kwargs)
        except Exception:
            excinfo = sys.exc_info()
            self.user_exception(excinfo, True)
        finally:
            sys.settrace(None)
            sys.setprofile(None) 
Example 25
Project: codimension   Author: SergeySatskiy   File: base_cdm_dbg.py    GNU General Public License v3.0 5 votes vote down vote up
def set_continue(self, special):
        """Stops only on next breakpoint"""
        # Here we only set a new stop frame if it is a normal continue.
        if not special:
            self._set_stopinfo(None, None)

        # Disable tracing if not started in debug mode
        if not self._dbgClient.debugging:
            sys.settrace(None)
            sys.setprofile(None) 
Example 26
Project: beacon-py   Author: deepsourcelabs   File: tracer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def start(self):
        """Start the tracer.

        Return a Python function that can be passed to `sys.setprofile`.
        """
        self.stopped = False
        if self.threading:
            if self.thread is None:
                self.thread = self.threading.currentThread()
            else:
                if self.thread.ident != self.threading.currentThread().ident:
                    return self._trace
        sys.setprofile(self._trace)
        return self._trace 
Example 27
Project: ironpython2   Author: IronLanguages   File: profile.py    Apache License 2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 28
Project: ironpython2   Author: IronLanguages   File: test_sys_setprofile.py    Apache License 2.0 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 29
Project: ironpython2   Author: IronLanguages   File: test_sys_setprofile.py    Apache License 2.0 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 30
Project: ironpython2   Author: IronLanguages   File: test_sys_setprofile.py    Apache License 2.0 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 31
Project: ironpython2   Author: IronLanguages   File: test_sys_setprofile.py    Apache License 2.0 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 32
Project: ironpython2   Author: IronLanguages   File: test_sys.py    Apache License 2.0 5 votes vote down vote up
def test_getdefaultencoding(self):
        if test.test_support.have_unicode:
            self.assertRaises(TypeError, sys.getdefaultencoding, 42)
            # can't check more than the type, as the user might have changed it
            self.assertIsInstance(sys.getdefaultencoding(), str)

    # testing sys.settrace() is done in test_sys_settrace.py
    # testing sys.setprofile() is done in test_sys_setprofile.py 
Example 33
Project: ironpython2   Author: IronLanguages   File: threading.py    Apache License 2.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 34
Project: pygears   Author: bogdanvuk   File: gear_inst.py    MIT License 5 votes vote down vote up
def __enter__(self):
        self.code_map.append(self.gear)

        # tracer is activated on next call, return or exception
        if registry('gear/current_module').parent == registry(
                'gear/hier_root'):
            sys.setprofile(self.tracer)

        return self 
Example 35
Project: pygears   Author: bogdanvuk   File: gear_inst.py    MIT License 5 votes vote down vote up
def __exit__(self, exception_type, exception_value, traceback):
        if registry('gear/current_module').parent == registry(
                'gear/hier_root'):
            sys.setprofile(None)

        cm = self.code_map.pop()

        if exception_type is None:
            for name, val in cm.func_locals.items():
                if isinstance(val, Intf):
                    val.var_name = name 
Example 36
Project: Blockly-rduino-communication   Author: technologiescollege   File: profile.py    GNU General Public License v3.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec(cmd, globals, locals)
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 37
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 38
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 39
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 40
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_sys_setprofile.py    GNU General Public License v3.0 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 41
Project: IronHydra   Author: microdee   File: profile.py    MIT License 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 42
Project: IronHydra   Author: microdee   File: threading.py    MIT License 5 votes vote down vote up
def setprofile(func):
    global _profile_hook
    _profile_hook = func 
Example 43
Project: 2015cdb_g4   Author: 40223208   File: threading.py    GNU General Public License v3.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 44
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: profile.py    GNU General Public License v2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec(cmd, globals, locals)
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 45
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_sys_setprofile.py    GNU General Public License v2.0 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 46
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_sys_setprofile.py    GNU General Public License v2.0 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 47
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_sys_setprofile.py    GNU General Public License v2.0 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 48
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_sys_setprofile.py    GNU General Public License v2.0 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 49
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_sys.py    GNU General Public License v2.0 5 votes vote down vote up
def test_getdefaultencoding(self):
        self.assertRaises(TypeError, sys.getdefaultencoding, 42)
        # can't check more than the type, as the user might have changed it
        self.assertIsInstance(sys.getdefaultencoding(), str)

    # testing sys.settrace() is done in test_sys_settrace.py
    # testing sys.setprofile() is done in test_sys_setprofile.py 
Example 50
Project: aws-lambda-runtime-pypy   Author: uscheller   File: profile.py    Apache License 2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec(cmd, globals, locals)
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 51
Project: aws-lambda-runtime-pypy   Author: uscheller   File: threading.py    Apache License 2.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 52
Project: Computable   Author: ktraunmueller   File: profile.py    MIT License 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 53
Project: oss-ftp   Author: aliyun   File: profile.py    MIT License 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 54
Project: oss-ftp   Author: aliyun   File: test_sys_setprofile.py    MIT License 5 votes vote down vote up
def setUp(self):
        sys.setprofile(None) 
Example 55
Project: oss-ftp   Author: aliyun   File: test_sys_setprofile.py    MIT License 5 votes vote down vote up
def tearDown(self):
        sys.setprofile(None) 
Example 56
Project: oss-ftp   Author: aliyun   File: test_sys_setprofile.py    MIT License 5 votes vote down vote up
def test_setget(self):
        def fn(*args):
            pass

        sys.setprofile(fn)
        self.assertIs(sys.getprofile(), fn) 
Example 57
Project: oss-ftp   Author: aliyun   File: test_sys_setprofile.py    MIT License 5 votes vote down vote up
def capture_events(callable, p=None):
    if p is None:
        p = HookWatcher()
    # Disable the garbage collector. This prevents __del__s from showing up in
    # traces.
    old_gc = gc.isenabled()
    gc.disable()
    try:
        sys.setprofile(p.callback)
        protect(callable, p)
        sys.setprofile(None)
    finally:
        if old_gc:
            gc.enable()
    return p.get_events()[1:-1] 
Example 58
Project: oss-ftp   Author: aliyun   File: test_sys.py    MIT License 5 votes vote down vote up
def test_getdefaultencoding(self):
        if test.test_support.have_unicode:
            self.assertRaises(TypeError, sys.getdefaultencoding, 42)
            # can't check more than the type, as the user might have changed it
            self.assertIsInstance(sys.getdefaultencoding(), str)

    # testing sys.settrace() is done in test_sys_settrace.py
    # testing sys.setprofile() is done in test_sys_setprofile.py 
Example 59
Project: oss-ftp   Author: aliyun   File: threading.py    MIT License 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 60
Project: FightstickDisplay   Author: calexil   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def _install_trace():
    global _trace_thread_count
    sys.setprofile(_thread_trace_func(_trace_thread_count))
    _trace_thread_count += 1 
Example 61
Project: setup   Author: mindbender-studio   File: profile.py    MIT License 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec(cmd, globals, locals)
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 62
Project: setup   Author: mindbender-studio   File: threading.py    MIT License 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 63
Project: 2015wcm   Author: coursemdetw   File: threading.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 64
Project: pmatic   Author: LarsMichelsen   File: threading.py    GNU General Public License v2.0 5 votes vote down vote up
def setprofile(func):
    global _profile_hook
    _profile_hook = func 
Example 65
Project: godot-zeronet-plugin   Author: zam-org   File: profile.py    GNU General Public License v2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 66
Project: godot-zeronet-plugin   Author: zam-org   File: threading.py    GNU General Public License v2.0 5 votes vote down vote up
def setprofile(func):
    """Set a profile function for all threads started from the threading module.

    The func will be passed to sys.setprofile() for each thread, before its
    run() method is called.

    """
    global _profile_hook
    _profile_hook = func 
Example 67
Project: godot-zeronet-plugin   Author: zam-org   File: profile.py    GNU General Public License v2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 68
Project: godot-zeronet-plugin   Author: zam-org   File: profile.py    GNU General Public License v2.0 5 votes vote down vote up
def runctx(self, cmd, globals, locals):
        self.set_cmd(cmd)
        sys.setprofile(self.dispatcher)
        try:
            exec cmd in globals, locals
        finally:
            sys.setprofile(None)
        return self

    # This method is more useful to profile a single function call. 
Example 69
Project: pyblish-win   Author: pyblish   File: profile.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 70
Project: NiujiaoDebugger   Author: MrSrc   File: profile.py    GNU General Public License v3.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 71
Project: ironpython2   Author: IronLanguages   File: profile.py    Apache License 2.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 72
Project: Blockly-rduino-communication   Author: technologiescollege   File: profile.py    GNU General Public License v3.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 73
Project: IronHydra   Author: microdee   File: profile.py    MIT License 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 74
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: profile.py    GNU General Public License v2.0 4 votes vote down vote up
def runcall(*args, **kw):
        if len(args) >= 2:
            self, func, *args = args
        elif not args:
            raise TypeError("descriptor 'runcall' of 'Profile' object "
                            "needs an argument")
        elif 'func' in kw:
            func = kw.pop('func')
            self, *args = args
        else:
            raise TypeError('runcall expected at least 1 positional argument, '
                            'got %d' % (len(args)-1))

        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 75
Project: aws-lambda-runtime-pypy   Author: uscheller   File: profile.py    Apache License 2.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 76
Project: Computable   Author: ktraunmueller   File: profile.py    MIT License 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 77
Project: oss-ftp   Author: aliyun   File: profile.py    MIT License 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 78
Project: setup   Author: mindbender-studio   File: profile.py    MIT License 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 79
Project: godot-zeronet-plugin   Author: zam-org   File: profile.py    GNU General Public License v2.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #************************************************************** 
Example 80
Project: godot-zeronet-plugin   Author: zam-org   File: profile.py    GNU General Public License v2.0 4 votes vote down vote up
def runcall(self, func, *args, **kw):
        self.set_cmd(repr(func))
        sys.setprofile(self.dispatcher)
        try:
            return func(*args, **kw)
        finally:
            sys.setprofile(None)


    #******************************************************************
    # The following calculates the overhead for using a profiler.  The
    # problem is that it takes a fair amount of time for the profiler
    # to stop the stopwatch (from the time it receives an event).
    # Similarly, there is a delay from the time that the profiler
    # re-starts the stopwatch before the user's code really gets to
    # continue.  The following code tries to measure the difference on
    # a per-event basis.
    #
    # Note that this difference is only significant if there are a lot of
    # events, and relatively little user code per event.  For example,
    # code with small functions will typically benefit from having the
    # profiler calibrated for the current platform.  This *could* be
    # done on the fly during init() time, but it is not worth the
    # effort.  Also note that if too large a value specified, then
    # execution time on some functions will actually appear as a
    # negative number.  It is *normal* for some functions (with very
    # low call counts) to have such negative stats, even if the
    # calibration figure is "correct."
    #
    # One alternative to profile-time calibration adjustments (i.e.,
    # adding in the magic little delta during each event) is to track
    # more carefully the number of events (and cumulatively, the number
    # of events during sub functions) that are seen.  If this were
    # done, then the arithmetic could be done after the fact (i.e., at
    # display time).  Currently, we track only call/return events.
    # These values can be deduced by examining the callees and callers
    # vectors for each functions.  Hence we *can* almost correct the
    # internal time figure at print time (note that we currently don't
    # track exception event processing counts).  Unfortunately, there
    # is currently no similar information for cumulative sub-function
    # time.  It would not be hard to "get all this info" at profiler
    # time.  Specifically, we would have to extend the tuples to keep
    # counts of this in each frame, and then extend the defs of timing
    # tuples to include the significant two figures. I'm a bit fearful
    # that this additional feature will slow the heavily optimized
    # event/time ratio (i.e., the profiler would run slower, fur a very
    # low "value added" feature.)
    #**************************************************************