Update documentation.
diff --git a/Changes b/Changes
index 310f0e1..1fea5e6 100644
--- a/Changes
+++ b/Changes
@@ -1,3 +1,9 @@
+0.3.0 2014-05-06
+
+* Remove @cache decorator.
+* Add size, getsizeof members.
+* Add @cachedmethod decorator.
+
0.2.0 2014-04-02
* Add @cache decorator.
diff --git a/README.rst b/README.rst
index 073a62b..4b01431 100644
--- a/README.rst
+++ b/README.rst
@@ -1,9 +1,9 @@
cachetools
========================================================================
-This module provides various memoizing collections and function
-decorators, including a variant of the Python 3 Standard Library
-`functools.lru_cache`_ decorator.
+This module provides various memoizing collections and decorators,
+including a variant of the Python 3 Standard Library
+`functools.lru_cache` function decorator.
.. code-block:: pycon
@@ -12,26 +12,36 @@
>>> cache['first'] = 1
>>> cache['second'] = 2
>>> cache
- LRUCache(OrderedDict([('first', 1), ('second', 2)]), maxsize=2)
+ LRUCache(OrderedDict([('first', 1), ('second', 2)]), size=2, maxsize=2)
>>> cache['third'] = 3
>>> cache
- LRUCache(OrderedDict([('second', 2), ('third', 3)]), maxsize=2)
+ LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2)
>>> cache['second']
2
>>> cache
- LRUCache(OrderedDict([('third', 3), ('second', 2)]), maxsize=2)
+ LRUCache(OrderedDict([('third', 3), ('second', 2)]), size=2, maxsize=2)
>>> cache['fourth'] = 4
>>> cache
- LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), maxsize=2)
+ LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2)
-For the purpose of this module, a *cache* is a mutable mapping_ of
-fixed size, defined by its ``maxsize`` attribute. When the cache is
-full, i.e. ``len(cache) == cache.maxsize``, the cache must choose
-which item(s) to discard based on a suitable `cache algorithm`_.
+For the purpose of this module, a *cache* is a mutable_ mapping_ with
+additional attributes ``size`` and ``maxsize``, which hold the current
+and maximum size of the cache, and a (possibly static) method
+``getsizeof``.
+
+The current size of the cache is the sum of the results of
+``getsizeof`` applied to each of the cache's values, i.e. ``cache.size
+== sum(map(cache.getsizeof, cache.values()), 0)``. As a special case,
+if ``getsizeof`` returns ``1`` irrespective of its argument,
+``cache.size == len(cache)``.
+
+When the cache is full, i.e. ``cache.size > cache.maxsize``, the cache
+must choose which item(s) to discard based on a suitable `cache
+algorithm`_.
This module provides various cache implementations based on different
cache algorithms, as well as decorators for easily memoizing function
-calls, and utilities for creating custom cache implementations.
+and method calls.
Installation
@@ -45,19 +55,19 @@
Project Resources
------------------------------------------------------------------------
+.. image:: http://img.shields.io/pypi/v/cachetools.svg
+ :target: https://pypi.python.org/pypi/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: http://img.shields.io/pypi/dm/cachetools.svg
+ :target: https://pypi.python.org/pypi/cachetools/
+ :alt: Number of PyPI downloads
+
- `Documentation`_
- `Issue Tracker`_
- `Source Code`_
- `Change Log`_
-.. image:: https://pypip.in/v/cachetools/badge.png
- :target: https://pypi.python.org/pypi/cachetools/
- :alt: Latest PyPI version
-
-.. image:: https://pypip.in/d/cachetools/badge.png
- :target: https://pypi.python.org/pypi/cachetools/
- :alt: Number of PyPI downloads
-
License
------------------------------------------------------------------------
@@ -68,6 +78,7 @@
.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache
+.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable
.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping
.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms
diff --git a/cachetools.py b/cachetools.py
index 06240bc..2edac36 100644
--- a/cachetools.py
+++ b/cachetools.py
@@ -202,58 +202,60 @@
return decorator
-def lru_cache(maxsize=128, typed=False, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that
- saves up to the `maxsize` most recent calls based on a Least
- Recently Used (LRU) algorithm.
- """
- if typed:
- return _cachedfunc(LRUCache(maxsize), _makekey_typed, lock())
- else:
- return _cachedfunc(LRUCache(maxsize), _makekey, lock())
-
-
-def lfu_cache(maxsize=128, typed=False, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that
- saves up to the `maxsize` most recent calls based on a Least
- Frequently Used (LFU) algorithm.
- """
- if typed:
- return _cachedfunc(LFUCache(maxsize), _makekey_typed, lock())
- else:
- return _cachedfunc(LFUCache(maxsize), _makekey, lock())
-
-
-def rr_cache(maxsize=128, typed=False, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that
- saves up to the `maxsize` most recent calls based on a Random
- Replacement (RR) algorithm.
- """
- if typed:
- return _cachedfunc(RRCache(maxsize), _makekey_typed, lock())
- else:
- return _cachedfunc(RRCache(maxsize), _makekey, lock())
-
-
-def cachedmethod(getcache, typed=False):
- """Decorator to wrap a class or instance method with a memoizing
- callable.
-
- """
-
- makekey = _makekey_typed if typed else _makekey
-
- def decorator(method):
+def _cachedmeth(getcache, makekey, lock):
+ def decorator(func):
def wrapper(self, *args, **kwargs):
+ key = makekey((func,) + args, kwargs)
cache = getcache(self)
- key = makekey((self, method) + args, kwargs)
- try:
- return cache[key]
- except KeyError:
- result = method(self, *args, **kwargs)
+ with lock:
+ try:
+ return cache[key]
+ except KeyError:
+ pass
+ result = func(self, *args, **kwargs)
+ with lock:
cache[key] = result
- return result
+ return result
- return functools.update_wrapper(wrapper, method)
+ return functools.update_wrapper(wrapper, func)
return decorator
+
+
+def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock())
+
+
+def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock())
+
+
+def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock())
+
+
+def cachedmethod(getcache, typed=False, lock=RLock):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a (possibly shared) cache.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedmeth(getcache, makekey, lock())
diff --git a/docs/index.rst b/docs/index.rst
index 9134e98..0c78a17 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -94,28 +94,51 @@
.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock)
Decorator to wrap a function with a memoizing callable that saves
- up to the `maxsize` most recent calls based on a Least Recently
- Used (LRU) algorithm.
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock)
Decorator to wrap a function with a memoizing callable that saves
- up to the `maxsize` most recent calls based on a Least Frequently
- Used (LFU) algorithm.
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
.. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock)
Decorator to wrap a function with a memoizing callable that saves
- up to the `maxsize` most recent calls based on a Random Replacement
- (RR) algorithm.
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
Method Decorators
------------------------------------------------------------------------
-.. decorator:: cachedmethod(getcache, typed=False)
+.. decorator:: cachedmethod(getcache, typed=False, lock=threading.RLock)
- Decorator to wrap a class or instance method with a memoizing callable.
+ Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a (possibly shared) cache.
+
+ `getcache` specifies a function of one argument that, when passed
+ :const:`self`, will return the cache object for the instance or
+ class. See the `Function Decorators`_ section for details on the
+ other arguments.
+
+ Python 3 example of a shared (class) LRU cache for static web
+ content::
+
+ class CachedPEPs(object):
+
+ cache = LRUCache(maxsize=32)
+
+ @cachedmethod(operator.attrgetter('cache'))
+ def get_pep(self, num):
+ """Retrieve text of a Python Enhancement Proposal"""
+ resource = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ try:
+ with urllib.request.urlopen(resource) as s:
+ return s.read()
+ except urllib.error.HTTPError:
+ return 'Not Found'
.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable