chore: 添加虚拟环境到仓库
- 添加 backend_service/venv 虚拟环境 - 包含所有Python依赖包 - 注意:虚拟环境约393MB,包含12655个文件
This commit is contained in:
@@ -0,0 +1,718 @@
|
||||
"""Extensible memoizing collections and decorators."""
|
||||
|
||||
__all__ = (
|
||||
"Cache",
|
||||
"FIFOCache",
|
||||
"LFUCache",
|
||||
"LRUCache",
|
||||
"RRCache",
|
||||
"TLRUCache",
|
||||
"TTLCache",
|
||||
"cached",
|
||||
"cachedmethod",
|
||||
)
|
||||
|
||||
__version__ = "6.2.2"
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import functools
|
||||
import heapq
|
||||
import random
|
||||
import time
|
||||
|
||||
from . import keys
|
||||
|
||||
|
||||
class _DefaultSize:
|
||||
__slots__ = ()
|
||||
|
||||
def __getitem__(self, _key):
|
||||
return 1
|
||||
|
||||
def __setitem__(self, _key, _value):
|
||||
pass
|
||||
|
||||
def pop(self, _key):
|
||||
return 1
|
||||
|
||||
|
||||
class Cache(collections.abc.MutableMapping):
|
||||
"""Mutable mapping to serve as a simple cache or cache base class."""
|
||||
|
||||
__marker = object()
|
||||
|
||||
__size = _DefaultSize()
|
||||
|
||||
def __init__(self, maxsize, getsizeof=None):
|
||||
if getsizeof:
|
||||
self.getsizeof = getsizeof
|
||||
if self.getsizeof is not Cache.getsizeof:
|
||||
self.__size = dict()
|
||||
self.__data = dict()
|
||||
self.__currsize = 0
|
||||
self.__maxsize = maxsize
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%s, maxsize=%r, currsize=%r)" % (
|
||||
type(self).__name__,
|
||||
repr(self.__data),
|
||||
self.__maxsize,
|
||||
self.__currsize,
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return self.__data[key]
|
||||
except KeyError:
|
||||
return self.__missing__(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
maxsize = self.__maxsize
|
||||
size = self.getsizeof(value)
|
||||
if size > maxsize:
|
||||
raise ValueError("value too large")
|
||||
if key not in self.__data or self.__size[key] < size:
|
||||
while self.__currsize + size > maxsize:
|
||||
self.popitem()
|
||||
if key in self.__data:
|
||||
diffsize = size - self.__size[key]
|
||||
else:
|
||||
diffsize = size
|
||||
self.__data[key] = value
|
||||
self.__size[key] = size
|
||||
self.__currsize += diffsize
|
||||
|
||||
def __delitem__(self, key):
|
||||
size = self.__size.pop(key)
|
||||
del self.__data[key]
|
||||
self.__currsize -= size
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self.__data
|
||||
|
||||
def __missing__(self, key):
|
||||
raise KeyError(key)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__data)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__data)
|
||||
|
||||
# Note that we cannot simply inherit get(), pop() and setdefault()
|
||||
# from MutableMapping, since these rely on __getitem__ throwing a
|
||||
# KeyError on cache miss. This is not the case if __missing__ is
|
||||
# implemented for a Cache subclass, so we have to roll our own,
|
||||
# somewhat less elegant versions.
|
||||
|
||||
def get(self, key, default=None):
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
def pop(self, key, default=__marker):
|
||||
if key in self:
|
||||
value = self[key]
|
||||
del self[key]
|
||||
elif default is self.__marker:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
value = default
|
||||
return value
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
if key in self:
|
||||
value = self[key]
|
||||
else:
|
||||
self[key] = value = default
|
||||
return value
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
"""The maximum size of the cache."""
|
||||
return self.__maxsize
|
||||
|
||||
@property
|
||||
def currsize(self):
|
||||
"""The current size of the cache."""
|
||||
return self.__currsize
|
||||
|
||||
@staticmethod
|
||||
def getsizeof(value):
|
||||
"""Return the size of a cache element's value."""
|
||||
return 1
|
||||
|
||||
|
||||
class FIFOCache(Cache):
|
||||
"""First In First Out (FIFO) cache implementation."""
|
||||
|
||||
def __init__(self, maxsize, getsizeof=None):
|
||||
Cache.__init__(self, maxsize, getsizeof)
|
||||
self.__order = collections.OrderedDict()
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
cache_setitem(self, key, value)
|
||||
try:
|
||||
self.__order.move_to_end(key)
|
||||
except KeyError:
|
||||
self.__order[key] = None
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
cache_delitem(self, key)
|
||||
del self.__order[key]
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return the `(key, value)` pair first inserted."""
|
||||
try:
|
||||
key = next(iter(self.__order))
|
||||
except StopIteration:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
else:
|
||||
return (key, self.pop(key))
|
||||
|
||||
|
||||
class LFUCache(Cache):
|
||||
"""Least Frequently Used (LFU) cache implementation."""
|
||||
|
||||
class _Link:
|
||||
__slots__ = ("count", "keys", "next", "prev")
|
||||
|
||||
def __init__(self, count):
|
||||
self.count = count
|
||||
self.keys = set()
|
||||
|
||||
def unlink(self):
|
||||
next = self.next
|
||||
prev = self.prev
|
||||
prev.next = next
|
||||
next.prev = prev
|
||||
|
||||
def __init__(self, maxsize, getsizeof=None):
|
||||
Cache.__init__(self, maxsize, getsizeof)
|
||||
self.__root = root = LFUCache._Link(0) # sentinel
|
||||
root.prev = root.next = root
|
||||
self.__links = {}
|
||||
|
||||
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
|
||||
value = cache_getitem(self, key)
|
||||
if key in self: # __missing__ may not store item
|
||||
self.__touch(key)
|
||||
return value
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
cache_setitem(self, key, value)
|
||||
if key in self.__links:
|
||||
return self.__touch(key)
|
||||
root = self.__root
|
||||
link = root.next
|
||||
if link.count != 1:
|
||||
link = LFUCache._Link(1)
|
||||
link.next = root.next
|
||||
root.next = link.next.prev = link
|
||||
link.prev = root
|
||||
link.keys.add(key)
|
||||
self.__links[key] = link
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
cache_delitem(self, key)
|
||||
link = self.__links.pop(key)
|
||||
link.keys.remove(key)
|
||||
if not link.keys:
|
||||
link.unlink()
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return the `(key, value)` pair least frequently used."""
|
||||
root = self.__root
|
||||
curr = root.next
|
||||
if curr is root:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
key = next(iter(curr.keys)) # remove an arbitrary element
|
||||
return (key, self.pop(key))
|
||||
|
||||
def __touch(self, key):
|
||||
"""Increment use count"""
|
||||
link = self.__links[key]
|
||||
curr = link.next
|
||||
if curr.count != link.count + 1:
|
||||
if len(link.keys) == 1:
|
||||
link.count += 1
|
||||
return
|
||||
curr = LFUCache._Link(link.count + 1)
|
||||
curr.next = link.next
|
||||
link.next = curr.next.prev = curr
|
||||
curr.prev = link
|
||||
curr.keys.add(key)
|
||||
link.keys.remove(key)
|
||||
if not link.keys:
|
||||
link.unlink()
|
||||
self.__links[key] = curr
|
||||
|
||||
|
||||
class LRUCache(Cache):
|
||||
"""Least Recently Used (LRU) cache implementation."""
|
||||
|
||||
def __init__(self, maxsize, getsizeof=None):
|
||||
Cache.__init__(self, maxsize, getsizeof)
|
||||
self.__order = collections.OrderedDict()
|
||||
|
||||
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
|
||||
value = cache_getitem(self, key)
|
||||
if key in self: # __missing__ may not store item
|
||||
self.__touch(key)
|
||||
return value
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
cache_setitem(self, key, value)
|
||||
self.__touch(key)
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
cache_delitem(self, key)
|
||||
del self.__order[key]
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return the `(key, value)` pair least recently used."""
|
||||
try:
|
||||
key = next(iter(self.__order))
|
||||
except StopIteration:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
else:
|
||||
return (key, self.pop(key))
|
||||
|
||||
def __touch(self, key):
|
||||
"""Mark as recently used"""
|
||||
try:
|
||||
self.__order.move_to_end(key)
|
||||
except KeyError:
|
||||
self.__order[key] = None
|
||||
|
||||
|
||||
class RRCache(Cache):
|
||||
"""Random Replacement (RR) cache implementation."""
|
||||
|
||||
def __init__(self, maxsize, choice=random.choice, getsizeof=None):
|
||||
Cache.__init__(self, maxsize, getsizeof)
|
||||
self.__choice = choice
|
||||
self.__index = {}
|
||||
self.__keys = []
|
||||
|
||||
@property
|
||||
def choice(self):
|
||||
"""The `choice` function used by the cache."""
|
||||
return self.__choice
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
cache_setitem(self, key, value)
|
||||
if key not in self.__index:
|
||||
self.__index[key] = len(self.__keys)
|
||||
self.__keys.append(key)
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
cache_delitem(self, key)
|
||||
index = self.__index.pop(key)
|
||||
if index != len(self.__keys) - 1:
|
||||
last = self.__keys[-1]
|
||||
self.__keys[index] = last
|
||||
self.__index[last] = index
|
||||
self.__keys.pop()
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return a random `(key, value)` pair."""
|
||||
try:
|
||||
key = self.__choice(self.__keys)
|
||||
except IndexError:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
else:
|
||||
return (key, self.pop(key))
|
||||
|
||||
|
||||
class _TimedCache(Cache):
|
||||
"""Base class for time aware cache implementations."""
|
||||
|
||||
class _Timer:
|
||||
def __init__(self, timer):
|
||||
self.__timer = timer
|
||||
self.__nesting = 0
|
||||
|
||||
def __call__(self):
|
||||
if self.__nesting == 0:
|
||||
return self.__timer()
|
||||
else:
|
||||
return self.__time
|
||||
|
||||
def __enter__(self):
|
||||
if self.__nesting == 0:
|
||||
self.__time = time = self.__timer()
|
||||
else:
|
||||
time = self.__time
|
||||
self.__nesting += 1
|
||||
return time
|
||||
|
||||
def __exit__(self, *exc):
|
||||
self.__nesting -= 1
|
||||
|
||||
def __reduce__(self):
|
||||
return _TimedCache._Timer, (self.__timer,)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.__timer, name)
|
||||
|
||||
def __init__(self, maxsize, timer=time.monotonic, getsizeof=None):
|
||||
Cache.__init__(self, maxsize, getsizeof)
|
||||
self.__timer = _TimedCache._Timer(timer)
|
||||
|
||||
def __repr__(self, cache_repr=Cache.__repr__):
|
||||
with self.__timer as time:
|
||||
self.expire(time)
|
||||
return cache_repr(self)
|
||||
|
||||
def __len__(self, cache_len=Cache.__len__):
|
||||
with self.__timer as time:
|
||||
self.expire(time)
|
||||
return cache_len(self)
|
||||
|
||||
@property
|
||||
def currsize(self):
|
||||
with self.__timer as time:
|
||||
self.expire(time)
|
||||
return super().currsize
|
||||
|
||||
@property
|
||||
def timer(self):
|
||||
"""The timer function used by the cache."""
|
||||
return self.__timer
|
||||
|
||||
def clear(self):
|
||||
with self.__timer as time:
|
||||
self.expire(time)
|
||||
Cache.clear(self)
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
with self.__timer:
|
||||
return Cache.get(self, *args, **kwargs)
|
||||
|
||||
def pop(self, *args, **kwargs):
|
||||
with self.__timer:
|
||||
return Cache.pop(self, *args, **kwargs)
|
||||
|
||||
def setdefault(self, *args, **kwargs):
|
||||
with self.__timer:
|
||||
return Cache.setdefault(self, *args, **kwargs)
|
||||
|
||||
|
||||
class TTLCache(_TimedCache):
|
||||
"""LRU Cache implementation with per-item time-to-live (TTL) value."""
|
||||
|
||||
class _Link:
|
||||
__slots__ = ("key", "expires", "next", "prev")
|
||||
|
||||
def __init__(self, key=None, expires=None):
|
||||
self.key = key
|
||||
self.expires = expires
|
||||
|
||||
def __reduce__(self):
|
||||
return TTLCache._Link, (self.key, self.expires)
|
||||
|
||||
def unlink(self):
|
||||
next = self.next
|
||||
prev = self.prev
|
||||
prev.next = next
|
||||
next.prev = prev
|
||||
|
||||
def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None):
|
||||
_TimedCache.__init__(self, maxsize, timer, getsizeof)
|
||||
self.__root = root = TTLCache._Link()
|
||||
root.prev = root.next = root
|
||||
self.__links = collections.OrderedDict()
|
||||
self.__ttl = ttl
|
||||
|
||||
def __contains__(self, key):
|
||||
try:
|
||||
link = self.__links[key] # no reordering
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return self.timer() < link.expires
|
||||
|
||||
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
|
||||
try:
|
||||
link = self.__getlink(key)
|
||||
except KeyError:
|
||||
expired = False
|
||||
else:
|
||||
expired = not (self.timer() < link.expires)
|
||||
if expired:
|
||||
return self.__missing__(key)
|
||||
else:
|
||||
return cache_getitem(self, key)
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
with self.timer as time:
|
||||
self.expire(time)
|
||||
cache_setitem(self, key, value)
|
||||
try:
|
||||
link = self.__getlink(key)
|
||||
except KeyError:
|
||||
self.__links[key] = link = TTLCache._Link(key)
|
||||
else:
|
||||
link.unlink()
|
||||
link.expires = time + self.__ttl
|
||||
link.next = root = self.__root
|
||||
link.prev = prev = root.prev
|
||||
prev.next = root.prev = link
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
cache_delitem(self, key)
|
||||
link = self.__links.pop(key)
|
||||
link.unlink()
|
||||
if not (self.timer() < link.expires):
|
||||
raise KeyError(key)
|
||||
|
||||
def __iter__(self):
|
||||
root = self.__root
|
||||
curr = root.next
|
||||
while curr is not root:
|
||||
# "freeze" time for iterator access
|
||||
with self.timer as time:
|
||||
if time < curr.expires:
|
||||
yield curr.key
|
||||
curr = curr.next
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__.update(state)
|
||||
root = self.__root
|
||||
root.prev = root.next = root
|
||||
for link in sorted(self.__links.values(), key=lambda obj: obj.expires):
|
||||
link.next = root
|
||||
link.prev = prev = root.prev
|
||||
prev.next = root.prev = link
|
||||
self.expire(self.timer())
|
||||
|
||||
@property
|
||||
def ttl(self):
|
||||
"""The time-to-live value of the cache's items."""
|
||||
return self.__ttl
|
||||
|
||||
def expire(self, time=None):
|
||||
"""Remove expired items from the cache and return an iterable of the
|
||||
expired `(key, value)` pairs.
|
||||
|
||||
"""
|
||||
if time is None:
|
||||
time = self.timer()
|
||||
root = self.__root
|
||||
curr = root.next
|
||||
links = self.__links
|
||||
expired = []
|
||||
cache_delitem = Cache.__delitem__
|
||||
cache_getitem = Cache.__getitem__
|
||||
while curr is not root and not (time < curr.expires):
|
||||
expired.append((curr.key, cache_getitem(self, curr.key)))
|
||||
cache_delitem(self, curr.key)
|
||||
del links[curr.key]
|
||||
next = curr.next
|
||||
curr.unlink()
|
||||
curr = next
|
||||
return expired
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return the `(key, value)` pair least recently used that
|
||||
has not already expired.
|
||||
|
||||
"""
|
||||
with self.timer as time:
|
||||
self.expire(time)
|
||||
try:
|
||||
key = next(iter(self.__links))
|
||||
except StopIteration:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
else:
|
||||
return (key, self.pop(key))
|
||||
|
||||
def __getlink(self, key):
|
||||
value = self.__links[key]
|
||||
self.__links.move_to_end(key)
|
||||
return value
|
||||
|
||||
|
||||
class TLRUCache(_TimedCache):
|
||||
"""Time aware Least Recently Used (TLRU) cache implementation."""
|
||||
|
||||
@functools.total_ordering
|
||||
class _Item:
|
||||
__slots__ = ("key", "expires", "removed")
|
||||
|
||||
def __init__(self, key=None, expires=None):
|
||||
self.key = key
|
||||
self.expires = expires
|
||||
self.removed = False
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.expires < other.expires
|
||||
|
||||
def __init__(self, maxsize, ttu, timer=time.monotonic, getsizeof=None):
|
||||
_TimedCache.__init__(self, maxsize, timer, getsizeof)
|
||||
self.__items = collections.OrderedDict()
|
||||
self.__order = []
|
||||
self.__ttu = ttu
|
||||
|
||||
def __contains__(self, key):
|
||||
try:
|
||||
item = self.__items[key] # no reordering
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return self.timer() < item.expires
|
||||
|
||||
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
|
||||
try:
|
||||
item = self.__getitem(key)
|
||||
except KeyError:
|
||||
expired = False
|
||||
else:
|
||||
expired = not (self.timer() < item.expires)
|
||||
if expired:
|
||||
return self.__missing__(key)
|
||||
else:
|
||||
return cache_getitem(self, key)
|
||||
|
||||
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
|
||||
with self.timer as time:
|
||||
expires = self.__ttu(key, value, time)
|
||||
if not (time < expires):
|
||||
return # skip expired items
|
||||
self.expire(time)
|
||||
cache_setitem(self, key, value)
|
||||
# removing an existing item would break the heap structure, so
|
||||
# only mark it as removed for now
|
||||
try:
|
||||
self.__getitem(key).removed = True
|
||||
except KeyError:
|
||||
pass
|
||||
self.__items[key] = item = TLRUCache._Item(key, expires)
|
||||
heapq.heappush(self.__order, item)
|
||||
|
||||
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
|
||||
with self.timer as time:
|
||||
# no self.expire() for performance reasons, e.g. self.clear() [#67]
|
||||
cache_delitem(self, key)
|
||||
item = self.__items.pop(key)
|
||||
item.removed = True
|
||||
if not (time < item.expires):
|
||||
raise KeyError(key)
|
||||
|
||||
def __iter__(self):
|
||||
for curr in self.__order:
|
||||
# "freeze" time for iterator access
|
||||
with self.timer as time:
|
||||
if time < curr.expires and not curr.removed:
|
||||
yield curr.key
|
||||
|
||||
@property
|
||||
def ttu(self):
|
||||
"""The local time-to-use function used by the cache."""
|
||||
return self.__ttu
|
||||
|
||||
def expire(self, time=None):
|
||||
"""Remove expired items from the cache and return an iterable of the
|
||||
expired `(key, value)` pairs.
|
||||
|
||||
"""
|
||||
if time is None:
|
||||
time = self.timer()
|
||||
items = self.__items
|
||||
order = self.__order
|
||||
# clean up the heap if too many items are marked as removed
|
||||
if len(order) > len(items) * 2:
|
||||
self.__order = order = [item for item in order if not item.removed]
|
||||
heapq.heapify(order)
|
||||
expired = []
|
||||
cache_delitem = Cache.__delitem__
|
||||
cache_getitem = Cache.__getitem__
|
||||
while order and (order[0].removed or not (time < order[0].expires)):
|
||||
item = heapq.heappop(order)
|
||||
if not item.removed:
|
||||
expired.append((item.key, cache_getitem(self, item.key)))
|
||||
cache_delitem(self, item.key)
|
||||
del items[item.key]
|
||||
return expired
|
||||
|
||||
def popitem(self):
|
||||
"""Remove and return the `(key, value)` pair least recently used that
|
||||
has not already expired.
|
||||
|
||||
"""
|
||||
with self.timer as time:
|
||||
self.expire(time)
|
||||
try:
|
||||
key = next(iter(self.__items))
|
||||
except StopIteration:
|
||||
raise KeyError("%s is empty" % type(self).__name__) from None
|
||||
else:
|
||||
return (key, self.pop(key))
|
||||
|
||||
def __getitem(self, key):
|
||||
value = self.__items[key]
|
||||
self.__items.move_to_end(key)
|
||||
return value
|
||||
|
||||
|
||||
_CacheInfo = collections.namedtuple(
|
||||
"CacheInfo", ["hits", "misses", "maxsize", "currsize"]
|
||||
)
|
||||
|
||||
|
||||
def cached(cache, key=keys.hashkey, lock=None, condition=None, info=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
results in a cache.
|
||||
|
||||
"""
|
||||
from ._cached import _wrapper
|
||||
|
||||
if isinstance(condition, bool):
|
||||
from warnings import warn
|
||||
|
||||
warn(
|
||||
"passing `info` as positional parameter is deprecated",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
info = condition
|
||||
condition = None
|
||||
|
||||
def decorator(func):
|
||||
if info:
|
||||
if isinstance(cache, Cache):
|
||||
|
||||
def make_info(hits, misses):
|
||||
return _CacheInfo(hits, misses, cache.maxsize, cache.currsize)
|
||||
|
||||
elif isinstance(cache, collections.abc.Mapping):
|
||||
|
||||
def make_info(hits, misses):
|
||||
return _CacheInfo(hits, misses, None, len(cache))
|
||||
|
||||
else:
|
||||
|
||||
def make_info(hits, misses):
|
||||
return _CacheInfo(hits, misses, 0, 0)
|
||||
|
||||
return _wrapper(func, cache, key, lock, condition, info=make_info)
|
||||
else:
|
||||
return _wrapper(func, cache, key, lock, condition)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def cachedmethod(cache, key=keys.methodkey, lock=None, condition=None):
|
||||
"""Decorator to wrap a class or instance method with a memoizing
|
||||
callable that saves results in a cache.
|
||||
|
||||
"""
|
||||
from ._cachedmethod import _wrapper
|
||||
|
||||
def decorator(method):
|
||||
return _wrapper(method, cache, key, lock, condition)
|
||||
|
||||
return decorator
|
||||
@@ -0,0 +1,247 @@
|
||||
"""Function decorator helpers."""
|
||||
|
||||
import functools
|
||||
|
||||
|
||||
def _condition_info(func, cache, key, lock, cond, info):
|
||||
hits = misses = 0
|
||||
pending = set()
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
nonlocal hits, misses
|
||||
k = key(*args, **kwargs)
|
||||
with lock:
|
||||
cond.wait_for(lambda: k not in pending)
|
||||
try:
|
||||
result = cache[k]
|
||||
hits += 1
|
||||
return result
|
||||
except KeyError:
|
||||
pending.add(k)
|
||||
misses += 1
|
||||
try:
|
||||
v = func(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
cache[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
finally:
|
||||
with lock:
|
||||
pending.remove(k)
|
||||
cond.notify_all()
|
||||
|
||||
def cache_clear():
|
||||
nonlocal hits, misses
|
||||
with lock:
|
||||
cache.clear()
|
||||
hits = misses = 0
|
||||
|
||||
def cache_info():
|
||||
with lock:
|
||||
return info(hits, misses)
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
wrapper.cache_info = cache_info
|
||||
return wrapper
|
||||
|
||||
|
||||
def _locked_info(func, cache, key, lock, info):
|
||||
hits = misses = 0
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
nonlocal hits, misses
|
||||
k = key(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
result = cache[k]
|
||||
hits += 1
|
||||
return result
|
||||
except KeyError:
|
||||
misses += 1
|
||||
v = func(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
# In case of a race condition, i.e. if another thread
|
||||
# stored a value for this key while we were calling
|
||||
# func(), prefer the cached value.
|
||||
return cache.setdefault(k, v)
|
||||
except ValueError:
|
||||
return v # value too large
|
||||
|
||||
def cache_clear():
|
||||
nonlocal hits, misses
|
||||
with lock:
|
||||
cache.clear()
|
||||
hits = misses = 0
|
||||
|
||||
def cache_info():
|
||||
with lock:
|
||||
return info(hits, misses)
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
wrapper.cache_info = cache_info
|
||||
return wrapper
|
||||
|
||||
|
||||
def _unlocked_info(func, cache, key, info):
|
||||
hits = misses = 0
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
nonlocal hits, misses
|
||||
k = key(*args, **kwargs)
|
||||
try:
|
||||
result = cache[k]
|
||||
hits += 1
|
||||
return result
|
||||
except KeyError:
|
||||
misses += 1
|
||||
v = func(*args, **kwargs)
|
||||
try:
|
||||
cache[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
|
||||
def cache_clear():
|
||||
nonlocal hits, misses
|
||||
cache.clear()
|
||||
hits = misses = 0
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
wrapper.cache_info = lambda: info(hits, misses)
|
||||
return wrapper
|
||||
|
||||
|
||||
def _uncached_info(func, info):
|
||||
misses = 0
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
nonlocal misses
|
||||
misses += 1
|
||||
return func(*args, **kwargs)
|
||||
|
||||
def cache_clear():
|
||||
nonlocal misses
|
||||
misses = 0
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
wrapper.cache_info = lambda: info(0, misses)
|
||||
return wrapper
|
||||
|
||||
|
||||
def _condition(func, cache, key, lock, cond):
|
||||
pending = set()
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
k = key(*args, **kwargs)
|
||||
with lock:
|
||||
cond.wait_for(lambda: k not in pending)
|
||||
try:
|
||||
result = cache[k]
|
||||
return result
|
||||
except KeyError:
|
||||
pending.add(k)
|
||||
try:
|
||||
v = func(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
cache[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
finally:
|
||||
with lock:
|
||||
pending.remove(k)
|
||||
cond.notify_all()
|
||||
|
||||
def cache_clear():
|
||||
with lock:
|
||||
cache.clear()
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
return wrapper
|
||||
|
||||
|
||||
def _locked(func, cache, key, lock):
|
||||
def wrapper(*args, **kwargs):
|
||||
k = key(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
return cache[k]
|
||||
except KeyError:
|
||||
pass # key not found
|
||||
v = func(*args, **kwargs)
|
||||
with lock:
|
||||
try:
|
||||
# possible race condition: see above
|
||||
return cache.setdefault(k, v)
|
||||
except ValueError:
|
||||
return v # value too large
|
||||
|
||||
def cache_clear():
|
||||
with lock:
|
||||
cache.clear()
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
return wrapper
|
||||
|
||||
|
||||
def _unlocked(func, cache, key):
|
||||
def wrapper(*args, **kwargs):
|
||||
k = key(*args, **kwargs)
|
||||
try:
|
||||
return cache[k]
|
||||
except KeyError:
|
||||
pass # key not found
|
||||
v = func(*args, **kwargs)
|
||||
try:
|
||||
cache[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
|
||||
wrapper.cache_clear = lambda: cache.clear()
|
||||
return wrapper
|
||||
|
||||
|
||||
def _uncached(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.cache_clear = lambda: None
|
||||
return wrapper
|
||||
|
||||
|
||||
def _wrapper(func, cache, key, lock=None, cond=None, info=None):
|
||||
if info is not None:
|
||||
if cache is None:
|
||||
wrapper = _uncached_info(func, info)
|
||||
elif cond is not None and lock is not None:
|
||||
wrapper = _condition_info(func, cache, key, lock, cond, info)
|
||||
elif cond is not None:
|
||||
wrapper = _condition_info(func, cache, key, cond, cond, info)
|
||||
elif lock is not None:
|
||||
wrapper = _locked_info(func, cache, key, lock, info)
|
||||
else:
|
||||
wrapper = _unlocked_info(func, cache, key, info)
|
||||
else:
|
||||
if cache is None:
|
||||
wrapper = _uncached(func)
|
||||
elif cond is not None and lock is not None:
|
||||
wrapper = _condition(func, cache, key, lock, cond)
|
||||
elif cond is not None:
|
||||
wrapper = _condition(func, cache, key, cond, cond)
|
||||
elif lock is not None:
|
||||
wrapper = _locked(func, cache, key, lock)
|
||||
else:
|
||||
wrapper = _unlocked(func, cache, key)
|
||||
wrapper.cache_info = None
|
||||
|
||||
wrapper.cache = cache
|
||||
wrapper.cache_key = key
|
||||
wrapper.cache_lock = lock if lock is not None else cond
|
||||
wrapper.cache_condition = cond
|
||||
|
||||
return functools.update_wrapper(wrapper, func)
|
||||
@@ -0,0 +1,128 @@
|
||||
"""Method decorator helpers."""
|
||||
|
||||
import functools
|
||||
import weakref
|
||||
|
||||
|
||||
def warn_cache_none():
|
||||
from warnings import warn
|
||||
|
||||
warn(
|
||||
"returning `None` from `cache(self)` is deprecated",
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
def _condition(method, cache, key, lock, cond):
|
||||
pending = weakref.WeakKeyDictionary()
|
||||
|
||||
def wrapper(self, *args, **kwargs):
|
||||
c = cache(self)
|
||||
if c is None:
|
||||
warn_cache_none()
|
||||
return method(self, *args, **kwargs)
|
||||
k = key(self, *args, **kwargs)
|
||||
with lock(self):
|
||||
p = pending.setdefault(self, set())
|
||||
cond(self).wait_for(lambda: k not in p)
|
||||
try:
|
||||
return c[k]
|
||||
except KeyError:
|
||||
p.add(k)
|
||||
try:
|
||||
v = method(self, *args, **kwargs)
|
||||
with lock(self):
|
||||
try:
|
||||
c[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
finally:
|
||||
with lock(self):
|
||||
pending[self].remove(k)
|
||||
cond(self).notify_all()
|
||||
|
||||
def cache_clear(self):
|
||||
c = cache(self)
|
||||
if c is not None:
|
||||
with lock(self):
|
||||
c.clear()
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
return wrapper
|
||||
|
||||
|
||||
def _locked(method, cache, key, lock):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
c = cache(self)
|
||||
if c is None:
|
||||
warn_cache_none()
|
||||
return method(self, *args, **kwargs)
|
||||
k = key(self, *args, **kwargs)
|
||||
with lock(self):
|
||||
try:
|
||||
return c[k]
|
||||
except KeyError:
|
||||
pass # key not found
|
||||
v = method(self, *args, **kwargs)
|
||||
# in case of a race, prefer the item already in the cache
|
||||
with lock(self):
|
||||
try:
|
||||
return c.setdefault(k, v)
|
||||
except ValueError:
|
||||
return v # value too large
|
||||
|
||||
def cache_clear(self):
|
||||
c = cache(self)
|
||||
if c is not None:
|
||||
with lock(self):
|
||||
c.clear()
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
return wrapper
|
||||
|
||||
|
||||
def _unlocked(method, cache, key):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
c = cache(self)
|
||||
if c is None:
|
||||
warn_cache_none()
|
||||
return method(self, *args, **kwargs)
|
||||
k = key(self, *args, **kwargs)
|
||||
try:
|
||||
return c[k]
|
||||
except KeyError:
|
||||
pass # key not found
|
||||
v = method(self, *args, **kwargs)
|
||||
try:
|
||||
c[k] = v
|
||||
except ValueError:
|
||||
pass # value too large
|
||||
return v
|
||||
|
||||
def cache_clear(self):
|
||||
c = cache(self)
|
||||
if c is not None:
|
||||
c.clear()
|
||||
|
||||
wrapper.cache_clear = cache_clear
|
||||
return wrapper
|
||||
|
||||
|
||||
def _wrapper(method, cache, key, lock=None, cond=None):
|
||||
if cond is not None and lock is not None:
|
||||
wrapper = _condition(method, cache, key, lock, cond)
|
||||
elif cond is not None:
|
||||
wrapper = _condition(method, cache, key, cond, cond)
|
||||
elif lock is not None:
|
||||
wrapper = _locked(method, cache, key, lock)
|
||||
else:
|
||||
wrapper = _unlocked(method, cache, key)
|
||||
|
||||
wrapper.cache = cache
|
||||
wrapper.cache_key = key
|
||||
wrapper.cache_lock = lock if lock is not None else cond
|
||||
wrapper.cache_condition = cond
|
||||
|
||||
return functools.update_wrapper(wrapper, method)
|
||||
@@ -0,0 +1,102 @@
|
||||
"""`functools.lru_cache` compatible memoizing function decorators."""
|
||||
|
||||
__all__ = ("fifo_cache", "lfu_cache", "lru_cache", "rr_cache", "ttl_cache")
|
||||
|
||||
import functools
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
from threading import Condition
|
||||
|
||||
from . import FIFOCache, LFUCache, LRUCache, RRCache, TTLCache
|
||||
from . import cached
|
||||
from . import keys
|
||||
|
||||
|
||||
class _UnboundTTLCache(TTLCache):
|
||||
def __init__(self, ttl, timer):
|
||||
TTLCache.__init__(self, math.inf, ttl, timer)
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
return None
|
||||
|
||||
|
||||
def _cache(cache, maxsize, typed):
|
||||
def decorator(func):
|
||||
key = keys.typedkey if typed else keys.hashkey
|
||||
wrapper = cached(cache=cache, key=key, condition=Condition(), info=True)(func)
|
||||
wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed}
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def fifo_cache(maxsize=128, typed=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
up to `maxsize` results based on a First In First Out (FIFO)
|
||||
algorithm.
|
||||
|
||||
"""
|
||||
if maxsize is None:
|
||||
return _cache({}, None, typed)
|
||||
elif callable(maxsize):
|
||||
return _cache(FIFOCache(128), 128, typed)(maxsize)
|
||||
else:
|
||||
return _cache(FIFOCache(maxsize), maxsize, typed)
|
||||
|
||||
|
||||
def lfu_cache(maxsize=128, typed=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
up to `maxsize` results based on a Least Frequently Used (LFU)
|
||||
algorithm.
|
||||
|
||||
"""
|
||||
if maxsize is None:
|
||||
return _cache({}, None, typed)
|
||||
elif callable(maxsize):
|
||||
return _cache(LFUCache(128), 128, typed)(maxsize)
|
||||
else:
|
||||
return _cache(LFUCache(maxsize), maxsize, typed)
|
||||
|
||||
|
||||
def lru_cache(maxsize=128, typed=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
up to `maxsize` results based on a Least Recently Used (LRU)
|
||||
algorithm.
|
||||
|
||||
"""
|
||||
if maxsize is None:
|
||||
return _cache({}, None, typed)
|
||||
elif callable(maxsize):
|
||||
return _cache(LRUCache(128), 128, typed)(maxsize)
|
||||
else:
|
||||
return _cache(LRUCache(maxsize), maxsize, typed)
|
||||
|
||||
|
||||
def rr_cache(maxsize=128, choice=random.choice, typed=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
up to `maxsize` results based on a Random Replacement (RR)
|
||||
algorithm.
|
||||
|
||||
"""
|
||||
if maxsize is None:
|
||||
return _cache({}, None, typed)
|
||||
elif callable(maxsize):
|
||||
return _cache(RRCache(128, choice), 128, typed)(maxsize)
|
||||
else:
|
||||
return _cache(RRCache(maxsize, choice), maxsize, typed)
|
||||
|
||||
|
||||
def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
|
||||
"""Decorator to wrap a function with a memoizing callable that saves
|
||||
up to `maxsize` results based on a Least Recently Used (LRU)
|
||||
algorithm with a per-item time-to-live (TTL) value.
|
||||
|
||||
"""
|
||||
if maxsize is None:
|
||||
return _cache(_UnboundTTLCache(ttl, timer), None, typed)
|
||||
elif callable(maxsize):
|
||||
return _cache(TTLCache(128, ttl, timer), 128, typed)(maxsize)
|
||||
else:
|
||||
return _cache(TTLCache(maxsize, ttl, timer), maxsize, typed)
|
||||
@@ -0,0 +1,62 @@
|
||||
"""Key functions for memoizing decorators."""
|
||||
|
||||
__all__ = ("hashkey", "methodkey", "typedkey", "typedmethodkey")
|
||||
|
||||
|
||||
class _HashedTuple(tuple):
|
||||
"""A tuple that ensures that hash() will be called no more than once
|
||||
per element, since cache decorators will hash the key multiple
|
||||
times on a cache miss. See also _HashedSeq in the standard
|
||||
library functools implementation.
|
||||
|
||||
"""
|
||||
|
||||
__hashvalue = None
|
||||
|
||||
def __hash__(self, hash=tuple.__hash__):
|
||||
hashvalue = self.__hashvalue
|
||||
if hashvalue is None:
|
||||
self.__hashvalue = hashvalue = hash(self)
|
||||
return hashvalue
|
||||
|
||||
def __add__(self, other, add=tuple.__add__):
|
||||
return _HashedTuple(add(self, other))
|
||||
|
||||
def __radd__(self, other, add=tuple.__add__):
|
||||
return _HashedTuple(add(other, self))
|
||||
|
||||
def __getstate__(self):
|
||||
return {}
|
||||
|
||||
|
||||
# A sentinel for separating args from kwargs. Using the class itself
|
||||
# ensures uniqueness and preserves identity when pickling/unpickling.
|
||||
_kwmark = (_HashedTuple,)
|
||||
|
||||
|
||||
def hashkey(*args, **kwargs):
|
||||
"""Return a cache key for the specified hashable arguments."""
|
||||
|
||||
if kwargs:
|
||||
return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
|
||||
else:
|
||||
return _HashedTuple(args)
|
||||
|
||||
|
||||
def methodkey(self, *args, **kwargs):
|
||||
"""Return a cache key for use with cached methods."""
|
||||
return hashkey(*args, **kwargs)
|
||||
|
||||
|
||||
def typedkey(*args, **kwargs):
|
||||
"""Return a typed cache key for the specified hashable arguments."""
|
||||
|
||||
key = hashkey(*args, **kwargs)
|
||||
key += tuple(type(v) for v in args)
|
||||
key += tuple(type(v) for _, v in sorted(kwargs.items()))
|
||||
return key
|
||||
|
||||
|
||||
def typedmethodkey(self, *args, **kwargs):
|
||||
"""Return a typed cache key for use with cached methods."""
|
||||
return typedkey(*args, **kwargs)
|
||||
Reference in New Issue
Block a user