Python functools.lru_cache() Examples

The following are code examples for showing how to use functools.lru_cache(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: tagged   Author: jviide   File: __init__.py    MIT License 6 votes vote down vote up
def tag(func=None, *, cache_maxsize=128):
    cached_split = functools.lru_cache(cache_maxsize)(split)

    def _tag(func):
        @functools.wraps(func)
        def __tag(string):
            strings, exprs = cached_split(string, compile_exprs=True)

            stack = inspect.stack()
            f_globals = stack[1].frame.f_globals
            f_locals = stack[1].frame.f_locals
            del stack

            values = []
            for expr in exprs:
                values.append(eval(expr, f_globals, f_locals))
            return func(strings, tuple(values))
        return __tag

    if func:
        return _tag(func)
    return _tag 
Example 2
Project: RTX   Author: RTXteam   File: CachedMethods.py    MIT License 6 votes vote down vote up
def register(method):
    """
    Put the lru_cache enabled method (or function) into `cached_methods`

    :param method: the method (or function) wrapped by `@functools.lru_cache`
                   that you want to be managed by this module
    :return:
    """
    global enabled, cached_methods, lru_cache_setting

    if enabled:
        method = lru_cache(**lru_cache_setting)(method)
        cached_methods.append(method)
        return method
    else:
        return method 
Example 3
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 6 votes vote down vote up
def test_lru_type_error(self):
        # Regression test for issue #28653.
        # lru_cache was leaking when one of the arguments
        # wasn't cacheable.

        @functools.lru_cache(maxsize=None)
        def infinite_cache(o):
            pass

        @functools.lru_cache(maxsize=10)
        def limited_cache(o):
            pass

        with self.assertRaises(TypeError):
            infinite_cache([])

        with self.assertRaises(TypeError):
            limited_cache([]) 
Example 4
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 6 votes vote down vote up
def test_need_for_rlock(self):
        # This will deadlock on an LRU cache that uses a regular lock

        @self.module.lru_cache(maxsize=10)
        def test_func(x):
            'Used to demonstrate a reentrant lru_cache call within a single thread'
            return x

        class DoubleEq:
            'Demonstrate a reentrant lru_cache call within a single thread'
            def __init__(self, x):
                self.x = x
            def __hash__(self):
                return self.x
            def __eq__(self, other):
                if self.x == 2:
                    test_func(DoubleEq(1))
                return self.x == other.x

        test_func(DoubleEq(1))                      # Load the cache
        test_func(DoubleEq(2))                      # Load the cache
        self.assertEqual(test_func(DoubleEq(2)),    # Trigger a re-entrant __eq__ call
                         DoubleEq(2))               # Verify the correct return value 
Example 5
Project: kucher   Author: Zubax   File: textual.py    GNU General Public License v3.0 6 votes vote down vote up
def _display_value_impl(value: typing.Union[type(None), str, bytes, int, float, bool, tuple],
                        type_id: Register.ValueType) -> str:
    """
    The value must be of a hashable type. List is not hashable, so it has to be converted to tuple first.
    The size of the cache must be a power of two (refer to the documentation for lru_cache for more info).
    Note that we're not using typed LRU cache, because the type information is conveyed by the type_id argument.
    Excessive keying by the type of the argument may reduce the efficiency of the cache.
    The unit test provided in this file shows about twenty-fold improvement in conversion speed with cache.
    """
    if value is None:
        return ''
    elif isinstance(value, (str, bytes)):
        return str(value)
    else:
        if isinstance(value, (int, float, bool)):
            value = [value]

        if (len(value) == 1) and not isinstance(value[0], float):
            return str(value[0])

        dtype = Register.get_numpy_type(type_id)
        if dtype is None:
            raise ValueError(f'Unknown type ID: {type_id!r}')

        return _display_array_of_scalars(value, dtype) 
Example 6
Project: sawtooth-core   Author: hyperledger   File: state_view.py    Apache License 2.0 6 votes vote down vote up
def lru_cached_method(*lru_args, **lru_kwargs):
    def decorator(wrapped_fn):
        @wraps(wrapped_fn)
        def wrapped(self, *args, **kwargs):
            # Use a weak reference to self; this prevents a self-reference
            # cycle that fools the garbage collector into thinking the instance
            # shouldn't be dropped when all external references are dropped.
            weak_ref_to_self = weakref.ref(self)
            @wraps(wrapped_fn)
            @lru_cache(*lru_args, **lru_kwargs)
            def cached(*args, **kwargs):
                return wrapped_fn(weak_ref_to_self(), *args, **kwargs)
            setattr(self, wrapped_fn.__name__, cached)
            return cached(*args, **kwargs)
        return wrapped
    return decorator 
Example 7
Project: sawtooth-core   Author: hyperledger   File: settings_view.py    Apache License 2.0 6 votes vote down vote up
def lru_cached_method(*lru_args, **lru_kwargs):
    def decorator(wrapped_fn):
        @wraps(wrapped_fn)
        def wrapped(self, *args, **kwargs):
            # Use a weak reference to self; this prevents a self-reference
            # cycle that fools the garbage collector into thinking the instance
            # shouldn't be dropped when all external references are dropped.
            weak_ref_to_self = weakref.ref(self)
            @wraps(wrapped_fn)
            @lru_cache(*lru_args, **lru_kwargs)
            def cached(*args, **kwargs):
                return wrapped_fn(weak_ref_to_self(), *args, **kwargs)
            setattr(self, wrapped_fn.__name__, cached)
            return cached(*args, **kwargs)
        return wrapped
    return decorator 
Example 8
Project: banruo   Author: yingshang   File: trans_real.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def check_for_language(lang_code):
    """
    Check whether there is a global language file for the given language
    code. This is used to decide whether a user-provided language is
    available.

    lru_cache should have a maxsize to prevent from memory exhaustion attacks,
    as the provided language codes are taken from the HTTP request. See also
    <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
    """
    # First, a quick check to make sure lang_code is well-formed (#21458)
    if lang_code is None or not language_code_re.search(lang_code):
        return False
    return any(
        gettext_module.find('django', path, [to_locale(lang_code)]) is not None
        for path in all_locale_paths()
    ) 
Example 9
Project: py-helios-node   Author: Helios-Protocol   File: transactions.py    MIT License 6 votes vote down vote up
def validate_transaction_signature(transaction: Union[BaseTransaction, BaseReceiveTransaction], return_sender = False) -> None:

    v = extract_signature_v(transaction.v)

    canonical_v = v - 27
    vrs = (canonical_v, transaction.r, transaction.s)
    signature = keys.Signature(vrs=vrs)

    message = transaction.get_message_for_signing()
    
    try:
        public_key = signature.recover_public_key_from_msg(message)
    except BadSignature as e:
        raise ValidationError("Bad Signature: {0}".format(str(e)))

    if not signature.verify_msg(message, public_key):
        raise ValidationError("Invalid Signature")

    if return_sender:
        return public_key.to_canonical_address()

#@lru_cache(maxsize=32) 
Example 10
Project: py-helios-node   Author: Helios-Protocol   File: node_score.py    MIT License 6 votes vote down vote up
def validate_node_staking_score_signature(node_staking_score: 'NodeStakingScore', return_sender = False) -> None:
    v = extract_signature_v(node_staking_score.v)


    canonical_v = v - 27
    vrs = (canonical_v, node_staking_score.r, node_staking_score.s)
    signature = keys.Signature(vrs=vrs)

    message = node_staking_score.get_message_for_signing()
    
    try:
        public_key = signature.recover_public_key_from_msg(message)
    except BadSignature as e:
        raise ValidationError("Bad Signature: {0}".format(str(e)))

    if not signature.verify_msg(message, public_key):
        raise ValidationError("Invalid Signature")

    if return_sender:
        return public_key.to_canonical_address()

#@lru_cache(maxsize=32) 
Example 11
Project: py-helios-node   Author: Helios-Protocol   File: timed_cache.py    MIT License 6 votes vote down vote up
def timed_cache(**timedelta_kwargs):
    def _wrapper(f):
        update_delta = timedelta(**timedelta_kwargs)
        next_update = datetime.utcnow() - update_delta
        # Apply @lru_cache to f with no cache size limit
        f = functools.lru_cache(None)(f)

        @functools.wraps(f)
        def _wrapped(*args, **kwargs):
            nonlocal next_update
            now = datetime.utcnow()
            if now >= next_update:
                f.cache_clear()
                next_update = now + update_delta
            return f(*args, **kwargs)

        return _wrapped

    return _wrapper 
Example 12
Project: py-helios-node   Author: Helios-Protocol   File: timed_cache.py    MIT License 6 votes vote down vote up
def async_timed_cache(**timedelta_kwargs):
    async def _wrapper(f):
        update_delta = timedelta(**timedelta_kwargs)
        next_update = datetime.utcnow() - update_delta
        # Apply @lru_cache to f with no cache size limit
        f = functools.lru_cache(None)(f)

        @functools.wraps(f)
        async def _wrapped(*args, **kwargs):
            nonlocal next_update
            now = datetime.utcnow()
            if now >= next_update:
                f.cache_clear()
                next_update = now + update_delta
            result = await f(*args, **kwargs)
            return result

        return _wrapped

    return _wrapper 
Example 13
Project: pyblish-win   Author: pyblish   File: _compat.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def callable(obj):
        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)


# --- stdlib additions


# py 3.2 functools.lru_cache
# Taken from: http://code.activestate.com/recipes/578078
# Credit: Raymond Hettinger 
Example 14
Project: ieml   Author: IEMLdev   File: parser.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.t_add_rules()

        self.lexer = get_script_lexer()
        self.parser = yacc.yacc(module=self, errorlog=logging, start='term',
                                debug=False, optimize=True,
                                picklefile=os.path.join(PARSER_FOLDER, "morpheme_parser.pickle"))
        # rename the parsing method (can't name it directly parse with lru_cache due to ply checking)
        self.parse = self.t_parse 
Example 15
Project: ieml   Author: IEMLdev   File: parser.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):

        # Build the lexer and parser
        self.lexer = get_lexer()
        self.parser = yacc.yacc(module=self, errorlog=logging, start='path',
                                debug=False, optimize=True, picklefile="parser/path_parser.pickle")
        # rename the parsing method (can't name it directly parse with lru_cache due to ply checking)
        self.parse = self.t_parse 
Example 16
Project: bigquerylayers   Author: smandaric   File: func.py    GNU General Public License v3.0 5 votes vote down vote up
def lru_cache(maxsize=128, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Recently Used (LRU)
    algorithm.

    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    else:
        return _cache(LRUCache(maxsize), typed) 
Example 17
Project: planb   Author: ossobv   File: zfs.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.poolname = self.config['POOLNAME']
        # Create LRU cache for this instance of Zfs.
        self.zfs_get_property = lru_cache(maxsize=32)(self.zfs_get_property) 
Example 18
Project: zorg   Author: Amsterdam   File: views.py    Mozilla Public License 2.0 5 votes vote down vote up
def _swagger_yml():
    """Swagger yaml file.

    Uses lru_cache so it can act as a singleton.
    """
    path = '{}/openapi.yml'.format(os.path.dirname(os.path.abspath(__file__)))
    with open(path) as file:
        return file.read() 
Example 19
Project: zorg   Author: Amsterdam   File: elastic.py    Mozilla Public License 2.0 5 votes vote down vote up
def _elasticsearch():
    """Elasticsearch Instance.

    lru_cache makes this a singleton. I think / hope we don't need to worry
    about the connection. The docs indicate that the Elasticsearch library
    takes care of this itself.

    :see: https://elasticsearch-py.readthedocs.io/en/master/#persistent-connections
    """
    return Elasticsearch(
        hosts=settings.ELASTIC_SEARCH_HOSTS, retry_on_timeout=True,
        refresh=True
    ) 
Example 20
Project: where   Author: kartverket   File: cache.py    MIT License 5 votes vote down vote up
def function(func):
    """Cache a given function call

    Uses the lru_cache (Least Recently Used) implementation from the functools standard library. Wrapped here to have a
    consistent lib.cache-module and possibly set the parameters of the functools.lru_cache.
    """
    return functools.lru_cache()(func) 
Example 21
Project: pyparadigm   Author: KnorrFG   File: checkerboard.py    MIT License 5 votes vote down vote up
def render_frame(screen, frame):
    # This time we dont use :py:func:`misc.display` and instead draw directly
    # onto the screen, and call flip() then to display it. Usually we would want
    # to generate a screen with a function (with lru_cache), and then use
    # :py:func:`misc.display` to blit the different screens. This way every
    # screen is only computed once. This time though, no screens are computed,
    # it is simply displaying an existing image, and no screens are reused.
    compose(screen)(Surface(scale=1)(frame))
    pygame.display.flip() 
Example 22
Project: versions.boop.ws   Author: duckinator   File: __init__.py    MIT License 5 votes vote down vote up
def run(cmd):
    print('$', cmd)
    args = shlex.split(cmd)
    output = check_output(args).decode().strip()
    print(output)
    print()
    return output


# We memoize() is_linux() to avoid running the same command repeatedly. 
Example 23
Project: versions.boop.ws   Author: duckinator   File: __init__.py    MIT License 5 votes vote down vote up
def is_linux():
    return run('uname -s') == 'Linux'


# We memoize() is_freebsd() to avoid running the same command repeatedly. 
Example 24
Project: python-podman   Author: containers   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def cached_property(fn):
    """Decorate property to cache return value."""
    return property(functools.lru_cache(maxsize=8)(fn)) 
Example 25
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_hash_only_once(self):
        # To protect against weird reentrancy bugs and to improve
        # efficiency when faced with slow __hash__ methods, the
        # LRU cache guarantees that it will only call __hash__
        # only once per use as an argument to the cached function.

        @self.module.lru_cache(maxsize=1)
        def f(x, y):
            return x * 3 + y

        # Simulate the integer 5
        mock_int = unittest.mock.Mock()
        mock_int.__mul__ = unittest.mock.Mock(return_value=15)
        mock_int.__hash__ = unittest.mock.Mock(return_value=999)

        # Add to cache:  One use as an argument gives one call
        self.assertEqual(f(mock_int, 1), 16)
        self.assertEqual(mock_int.__hash__.call_count, 1)
        self.assertEqual(f.cache_info(), (0, 1, 1, 1))

        # Cache hit: One use as an argument gives one additional call
        self.assertEqual(f(mock_int, 1), 16)
        self.assertEqual(mock_int.__hash__.call_count, 2)
        self.assertEqual(f.cache_info(), (1, 1, 1, 1))

        # Cache eviction: No use as an argument gives no additional call
        self.assertEqual(f(6, 2), 20)
        self.assertEqual(mock_int.__hash__.call_count, 2)
        self.assertEqual(f.cache_info(), (1, 2, 1, 1))

        # Cache miss: One use as an argument gives one additional call
        self.assertEqual(f(mock_int, 1), 16)
        self.assertEqual(mock_int.__hash__.call_count, 3)
        self.assertEqual(f.cache_info(), (1, 3, 1, 1)) 
Example 26
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_reentrancy_with_len(self):
        # Test to make sure the LRU cache code isn't thrown-off by
        # caching the built-in len() function.  Since len() can be
        # cached, we shouldn't use it inside the lru code itself.
        old_len = builtins.len
        try:
            builtins.len = self.module.lru_cache(4)(len)
            for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
                self.assertEqual(len('abcdefghijklmn'[:i]), i)
        finally:
            builtins.len = old_len 
Example 27
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_star_arg_handling(self):
        # Test regression that arose in ea064ff3c10f
        @functools.lru_cache()
        def f(*args):
            return args

        self.assertEqual(f(1, 2), (1, 2))
        self.assertEqual(f((1, 2)), ((1, 2),)) 
Example 28
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_with_maxsize_none(self):
        @self.module.lru_cache(maxsize=None)
        def fib(n):
            if n < 2:
                return n
            return fib(n-1) + fib(n-2)
        self.assertEqual([fib(n) for n in range(16)],
            [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
        fib.cache_clear()
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) 
Example 29
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_with_exceptions(self):
        # Verify that user_function exceptions get passed through without
        # creating a hard-to-read chained exception.
        # http://bugs.python.org/issue13177
        for maxsize in (None, 128):
            @self.module.lru_cache(maxsize)
            def func(i):
                return 'abc'[i]
            self.assertEqual(func(0), 'a')
            with self.assertRaises(IndexError) as cm:
                func(15)
            self.assertIsNone(cm.exception.__context__)
            # Verify that the previous exception did not result in a cached entry
            with self.assertRaises(IndexError):
                func(15) 
Example 30
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_with_types(self):
        for maxsize in (None, 128):
            @self.module.lru_cache(maxsize=maxsize, typed=True)
            def square(x):
                return x * x
            self.assertEqual(square(3), 9)
            self.assertEqual(type(square(3)), type(9))
            self.assertEqual(square(3.0), 9.0)
            self.assertEqual(type(square(3.0)), type(9.0))
            self.assertEqual(square(x=3), 9)
            self.assertEqual(type(square(x=3)), type(9))
            self.assertEqual(square(x=3.0), 9.0)
            self.assertEqual(type(square(x=3.0)), type(9.0))
            self.assertEqual(square.cache_info().hits, 4)
            self.assertEqual(square.cache_info().misses, 4) 
Example 31
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_with_keyword_args(self):
        @self.module.lru_cache()
        def fib(n):
            if n < 2:
                return n
            return fib(n=n-1) + fib(n=n-2)
        self.assertEqual(
            [fib(n=number) for number in range(16)],
            [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
        )
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
        fib.cache_clear()
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0)) 
Example 32
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_with_keyword_args_maxsize_none(self):
        @self.module.lru_cache(maxsize=None)
        def fib(n):
            if n < 2:
                return n
            return fib(n=n-1) + fib(n=n-2)
        self.assertEqual([fib(n=number) for number in range(16)],
            [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
        fib.cache_clear()
        self.assertEqual(fib.cache_info(),
            self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0)) 
Example 33
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_kwargs_order(self):
        # PEP 468: Preserving Keyword Argument Order
        @self.module.lru_cache(maxsize=10)
        def f(**kwargs):
            return list(kwargs.items())
        self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
        self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
        self.assertEqual(f.cache_info(),
            self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2)) 
Example 34
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_cache_threaded2(self):
        # Simultaneous call with the same arguments
        n, m = 5, 7
        start = threading.Barrier(n+1)
        pause = threading.Barrier(n+1)
        stop = threading.Barrier(n+1)
        @self.module.lru_cache(maxsize=m*n)
        def f(x):
            pause.wait(10)
            return 3 * x
        self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
        def test():
            for i in range(m):
                start.wait(10)
                self.assertEqual(f(i), 3 * i)
                stop.wait(10)
        threads = [threading.Thread(target=test) for k in range(n)]
        with support.start_threads(threads):
            for i in range(m):
                start.wait(10)
                stop.reset()
                pause.wait(10)
                start.reset()
                stop.wait(10)
                pause.reset()
                self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1)) 
Example 35
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_cache_threaded3(self):
        @self.module.lru_cache(maxsize=2)
        def f(x):
            time.sleep(.01)
            return 3 * x
        def test(i, x):
            with self.subTest(thread=i):
                self.assertEqual(f(x), 3 * x, i)
        threads = [threading.Thread(target=test, args=(i, v))
                   for i, v in enumerate([1, 2, 2, 3, 2])]
        with support.start_threads(threads):
            pass 
Example 36
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_early_detection_of_bad_call(self):
        # Issue #22184
        with self.assertRaises(TypeError):
            @functools.lru_cache
            def f():
                pass 
Example 37
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_copy(self):
        cls = self.__class__
        def orig(x, y):
            return 3 * x + y
        part = self.module.partial(orig, 2)
        funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
                 self.module.lru_cache(2)(part))
        for f in funcs:
            with self.subTest(func=f):
                f_copy = copy.copy(f)
                self.assertIs(f_copy, f) 
Example 38
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_deepcopy(self):
        cls = self.__class__
        def orig(x, y):
            return 3 * x + y
        part = self.module.partial(orig, 2)
        funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
                 self.module.lru_cache(2)(part))
        for f in funcs:
            with self.subTest(func=f):
                f_copy = copy.deepcopy(f)
                self.assertIs(f_copy, f) 
Example 39
Project: NiujiaoDebugger   Author: MrSrc   File: test_inspect.py    GNU General Public License v3.0 5 votes vote down vote up
def test_unwrap_one(self):
        def func(a, b):
            return a + b
        wrapper = functools.lru_cache(maxsize=20)(func)
        self.assertIs(inspect.unwrap(wrapper), func) 
Example 40
Project: NiujiaoDebugger   Author: MrSrc   File: typing.py    GNU General Public License v3.0 5 votes vote down vote up
def _tp_cache(func):
    """Internal wrapper caching __getitem__ of generic types with a fallback to
    original function for non-hashable arguments.
    """
    cached = functools.lru_cache()(func)
    _cleanups.append(cached.cache_clear)

    @functools.wraps(func)
    def inner(*args, **kwds):
        try:
            return cached(*args, **kwds)
        except TypeError:
            pass  # All real errors (not unhashable args) are raised below.
        return func(*args, **kwds)
    return inner 
Example 41
Project: sympad   Author: Pristine-Cat   File: test.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def ast2py (ast): return sym.ast2py (ast)

# @lru_cache (maxsize = None)
# def ast2spt (ast): return sym.ast2spt (ast) 
Example 42
Project: FX-RER-Value-Extraction   Author: tsKenneth   File: backend_bases.py    MIT License 5 votes vote down vote up
def _fix_ipython_backend2gui(cls):
        # Fix hard-coded module -> toolkit mapping in IPython (used for
        # `ipython --auto`).  This cannot be done at import time due to
        # ordering issues, so we do it when creating a canvas, and should only
        # be done once per class (hence the `lru_cache(1)`).
        if "IPython" not in sys.modules:
            return
        import IPython
        ip = IPython.get_ipython()
        if not ip:
            return
        from IPython.core import pylabtools as pt
        if (not hasattr(pt, "backend2gui")
                or not hasattr(ip, "enable_matplotlib")):
            # In case we ever move the patch to IPython and remove these APIs,
            # don't break on our side.
            return
        backend_mod = sys.modules[cls.__module__]
        rif = getattr(backend_mod, "required_interactive_framework", None)
        backend2gui_rif = {"qt5": "qt", "qt4": "qt", "gtk3": "gtk3",
                           "wx": "wx", "macosx": "osx"}.get(rif)
        if backend2gui_rif:
            pt.backend2gui[get_backend()] = backend2gui_rif
            # Work around pylabtools.find_gui_and_backend always reading from
            # rcParamsOrig.
            orig_origbackend = mpl.rcParamsOrig["backend"]
            try:
                mpl.rcParamsOrig["backend"] = mpl.rcParams["backend"]
                ip.enable_matplotlib()
            finally:
                mpl.rcParamsOrig["backend"] = orig_origbackend 
Example 43
Project: htm.py   Author: jviide   File: __init__.py    MIT License 5 votes vote down vote up
def htm(func=None, *, cache_maxsize=128):
    cached_parse = functools.lru_cache(maxsize=cache_maxsize)(htm_parse)

    def _htm(h):
        @tag
        @functools.wraps(h)
        def __htm(strings, values):
            ops = cached_parse(strings)
            return htm_eval(h, ops, values)
        return __htm

    if func is not None:
        return _htm(func)
    return _htm 
Example 44
Project: aws-cfn-plex   Author: lordmuffin   File: ipaddress.py    MIT License 5 votes vote down vote up
def isinstance(val, types):
    if types is int:
        types = (int, long)
    elif type(types) is tuple and int in types:
        types += (long,)
    return _builtin_isinstance(val, types)

# functools.lru_cache is Python 3.2+ only.
# /@functools.lru_cache()/d

# int().to_bytes is Python 3.2+ only.
# s/\(\w+\)\.to_bytes(/_int_to_bytes(\1, / 
Example 45
Project: filesystem_spec   Author: intake   File: caching.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, blocksize, fetcher, size, maxblocks=32):
        super().__init__(blocksize, fetcher, size)
        self.nblocks = math.ceil(size / blocksize)
        self.maxblocks = maxblocks
        self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) 
Example 46
Project: filesystem_spec   Author: intake   File: caching.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __setstate__(self, state):
        self.__dict__.update(state)
        self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
            self._fetch_block
        ) 
Example 47
Project: sdklib   Author: ivanprjcts   File: compat.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def cache(*args, **kargs):
        def wrapper(f):
            def real_wrapper(*args, **kwargs):
                raise exceptions.NotImplementedError("Only available for python 3.2. or +.")
            return real_wrapper
        return wrapper 
Example 48
Project: asynq   Author: quora   File: tools.py    Apache License 2.0 5 votes vote down vote up
def alru_cache(maxsize=128, key_fn=None):
    """Async equivalent of qcore.caching.lru_cache().

    maxsize is the number of different keys cache can accomodate.
    key_fn is the function that builds key from args. The default key function
    creates a tuple out of args and kwargs. If you use the default it acts the same
    as functools.lru_cache (except with async).

    Possible use cases of key_fn:
    - Your cache key is very large, so you don't want to keep the whole key in memory.
    - The function takes some arguments that don't affect the result.

    """

    def decorator(fn):
        cache = LRUCache(maxsize)
        argspec = inspect2.getfullargspec(get_original_fn(fn))
        arg_names = argspec.args[1:] + argspec.kwonlyargs  # remove self
        async_fun = fn.asynq
        kwargs_defaults = get_kwargs_defaults(argspec)

        cache_key = key_fn
        if cache_key is None:
            def cache_key(args, kwargs):
                return get_args_tuple(args, kwargs, arg_names, kwargs_defaults)

        @asynq()
        @functools.wraps(fn)
        def wrapper(*args, **kwargs):
            key = cache_key(args, kwargs)
            try:
                result(cache[key]); return
            except KeyError:
                value = yield async_fun(*args, **kwargs)
                cache[key] = value
                result(value); return

        return wrapper
    return decorator 
Example 49
Project: mars   Author: mars-project   File: distributor.py    Apache License 2.0 5 votes vote down vote up
def gen_distributor(scheduler_n_process, worker_n_process):
    class LocalClusterDistributor(Distributor):
        def __init__(self, n_process):
            super().__init__(n_process)
            self._scheduler_distributor = MarsDistributor(scheduler_n_process, 's:h1:')
            self._worker_distributor = MarsDistributor(worker_n_process, 'w:0:')

        @staticmethod
        def _is_worker_uid(uid):
            return isinstance(uid, str) and uid.startswith('w:')

        @functools.lru_cache(100)
        def distribute(self, uid):
            if self._is_worker_uid(uid):
                return self._worker_distributor.distribute(uid) + scheduler_n_process

            return self._scheduler_distributor.distribute(uid)

        def make_same_process(self, uid, uid_rel, delta=0):
            if self._is_worker_uid(uid_rel):
                return self._worker_distributor.make_same_process(uid, uid_rel, delta=delta)
            return self._scheduler_distributor.make_same_process(uid, uid_rel, delta=delta)

    return LocalClusterDistributor(scheduler_n_process + worker_n_process) 
Example 50
Project: PyChunkedGraph   Author: seung-lab   File: meshgen.py    Mozilla Public License 2.0 5 votes vote down vote up
def get_root_l2_remapping(cg, chunk_id, stop_layer, time_stamp, n_threads=4):
    """ Retrieves root to l2 node id mapping

    :param cg: chunkedgraph object
    :param chunk_id: np.uint64
    :param stop_layer: int
    :param time_stamp: datetime object
    :return: multiples
    """

    def _get_root_ids(args):
        start_id, end_id = args

        root_ids[start_id:end_id] = cg.get_roots(l2_ids[start_id:end_id])

    l2_id_remap = get_l2_remapping(cg, chunk_id, time_stamp=time_stamp)

    l2_ids = np.array(list(l2_id_remap.keys()))

    root_ids = np.zeros(len(l2_ids), dtype=np.uint64)
    n_jobs = np.min([n_threads, len(l2_ids)])
    multi_args = []
    start_ids = np.linspace(0, len(l2_ids), n_jobs + 1).astype(np.int)
    for i_block in range(n_jobs):
        multi_args.append([start_ids[i_block], start_ids[i_block + 1]])

    if n_jobs > 0:
        mu.multithread_func(_get_root_ids, multi_args, n_threads=n_threads)

    return l2_ids, root_ids, l2_id_remap


# @lru_cache(maxsize=None)