summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJay Hutchinson <jlhutch@gmail.com>2011-07-10 22:54:34 -0500
committerJay Hutchinson <jlhutch@gmail.com>2011-07-10 22:54:34 -0500
commitecc361e79226ac98c260351f2f1976160b54c741 (patch)
tree83f67f0178b356aeabd3bb4f7ab554e7c16dd696
parent2614085ff43feb1c9609f1d3ebce43b35f8a46f4 (diff)
downloadpylru-ecc361e79226ac98c260351f2f1976160b54c741.tar.gz
Cache methods keys(), values(), and items() now return iterators.
Added __iter__ method.
-rw-r--r--README.txt34
-rw-r--r--pylru.py38
-rw-r--r--test.py33
3 files changed, 51 insertions, 54 deletions
diff --git a/README.txt b/README.txt
index d2466cb..a6cdf89 100644
--- a/README.txt
+++ b/README.txt
@@ -24,9 +24,16 @@ An LRU cache object has a dictionary like interface and can be used in the same
value = cache[key] # Lookup a value given its key
del cache[key] # Remove a value given its key
- cache.keys() # Returns the keys in the cache *
- cache.values() # Returns the values in the cache *
- cache.items() # Returns the key, value pairs in the cache *
+ cache.keys() # Return an iterator over the keys in the cache
+ cache.values() # Return an iterator over the values in the cache
+ cache.items() # Return an iterator over the (key, value) pairs in the
+ # cache.
+ #
+ # These calls have no effect on the cache order.
+ # The iterators iterate over their respective elements
+ # in the order of most recently used to least recently
+ # used. Caches support __iter__ so you can use them
+ # directly in a for loop to loop over the keys.
cache.size() # Returns the size of the cache
cache.size(x) # Changes the size of the cache. x MUST be greater than
@@ -37,7 +44,6 @@ An LRU cache object has a dictionary like interface and can be used in the same
cache.clear() # Remove all elements from the cache.
-* These calls have no effect on the cache order.
The lrucache takes an optional callback function as a second argument. Since the cache has a fixed size some operations, such as an insertion, may cause a key/value pair to be ejected. If the optional callback function is given it will be called when this occurs. For example::
@@ -53,7 +59,7 @@ The lrucache takes an optional callback function as a second argument. Since the
# the fixed cache size. But, not before the callback is called to let you
# know.
-Often a cache is used to speed up access to some other low latency object. If that object has a dictionary interface a convenience wrapper class provided by PyLRU can be used. This class takes as an argument the object you want to wrap and the cache size. It then creates an LRU cache for the object and automatically manages it. For example, imagine you have an object with a dictionary interface that reads/writes its values to and from a remote server. Let us call this object slowDict::
+Often a cache is used to speed up access to some other high latency object. If that object has a dictionary interface a convenience wrapper class provided by PyLRU can be used. This class takes as an argument the object you want to wrap and the cache size. It then creates an LRU cache for the object and automatically manages it. For example, imagine you have an object with a dictionary interface that reads/writes its values to and from a remote server. Let us call this object slowDict::
import pylru
@@ -63,9 +69,9 @@ Often a cache is used to speed up access to some other low latency object. If th
# Now cacheDict can be used just like slowDict, except all of the lookups
# are automatically cached for you using an LRU cache.
-By default lruwrap uses write-through semantics. For instance, in the above example insertions are updated in the cache and written through to slowDict immediatly. The cache and the underlying object are not allowed to get out of sync. So only lookup performace can be improved by the cache. lruwrap takes an optional third argument. If set to True write-back semantics will be used. Insertions will be updated to the cache. The underlying slowDict will automatically be updated only when a "dirty" key/value pair is ejected from the cache.
+By default lruwrap uses write-through semantics. For instance, in the above example insertions are updated in the cache and written through to slowDict immediately. The cache and the underlying object are not allowed to get out of sync. So only lookup performance can be improved by the cache. lruwrap takes an optional third argument. If set to True write-back semantics will be used. Insertions will be updated to the cache. The underlying slowDict will automatically be updated only when a "dirty" key/value pair is ejected from the cache.
-The programmer is responsible for one thing though. They MUST call sync() when they are finished. This ensures that the last of the "dirty" entries in the cache are written back::
+If write-back is used the programmer is responsible for one more thing. They MUST call sync() when they are finished. This ensures that the last of the "dirty" entries in the cache are written back::
import pylru
@@ -85,7 +91,7 @@ To help the programmer with this the lruwrap can be used in a with statement::
with pylru.lruwrap(slowDict, size, True) as cacheDict
# Use cacheDict, sync() is called automatically for you when leaving the
- # with statment block.
+ # with statement block.
PyLRU also provides a function decorator::
@@ -98,16 +104,4 @@ PyLRU also provides a function decorator::
# Now results of the square function are cached for future lookup.
-To iterate over the cache elements, use .items()::
- import pylru
-
- cache = pylru.lrucache(size, callback)
-
- # items() returns a list of key, value pairs without modifying order.
- for key, val in cache.items():
- print key, val
-
- # WARNING: while keys() does not modify order, the cache[key] call does!
- for key in cache.keys():
- print key, cache[key]
diff --git a/pylru.py b/pylru.py
index 55e4b67..27df250 100644
--- a/pylru.py
+++ b/pylru.py
@@ -178,23 +178,37 @@ class lrucache(object):
self.mtf(node)
self.head = node.next
+ def __iter__(self):
+
+ # Return an iterator that returns the keys in the cache in order from
+ # the most recently to least recently used. Does not modify the cache
+ # order.
+ for node in self.dli():
+ yield node.key
+
def items(self):
- # return the (key, value) pairs (from most recent to least)
- # without modifying the cache order
- return zip(self.keys(), self.values())
+ # Return an iterator that returns the (key, value) pairs in the cache
+ # in order from the most recently to least recently used. Does not
+ # modify the cache order.
+ for node in self.dli():
+ yield (node.key, node.obj)
def keys(self):
- # return the keys (from most recent to least) in the cache
- # does not modify the cache order
- return self.table.keys()
+ # Return an iterator that returns the keys in the cache in order from
+ # the most recently to least recently used. Does not modify the cache
+ # order.
+ for node in self.dli():
+ yield node.key
def values(self):
- # return the values in the cache (from most recent to least)
- # does not modify the cache order
- return [node.obj for node in self.table.values()]
+ # Return an iterator that returns the values in the cache in order from
+ # the most recently to least recently used. Does not modify the cache
+ # order.
+ for node in self.dli():
+ yield node.obj
def size(self, size=None):
@@ -260,6 +274,12 @@ class lrucache(object):
node.next.prev = node
node.prev.next = node
+ def dli(self):
+ node = self.head
+ for i in range(len(self)):
+ yield node
+ node = node.next
+
class lruwrap(object):
diff --git a/test.py b/test.py
index a6d0be3..8edb7a4 100644
--- a/test.py
+++ b/test.py
@@ -101,6 +101,14 @@ def testcache():
assert q == b.cache[::-1]
+ q2 = []
+ for x, y in q:
+ q2.append((x, y))
+
+ assert list(a.items()) == q2
+ assert zip(a.keys(), a.values()) == q2
+ assert list(a.keys()) == list(a)
+
a = lrucache(128)
b = simplelrucache(128)
@@ -158,30 +166,6 @@ def testDecorator():
assert square(x) == x*x
-def testItems():
- a = lrucache(128)
- b = simplelrucache(128)
-
- for i in range(1000):
- x = random.randint(0, 512)
- y = random.randint(0, 512)
- a[x] = y
- b[x] = y
-
- for k, v in a.items():
- assert k in a
- assert a[k] == v
- assert b[k] == v
-
- # ensure the order returned in items() is correct.
- items = a.items()
- for k, v in reversed(items):
- a[k] = v
-
- # test the order is returned correctly.
- assert items == a.items()
-
-
if __name__ == '__main__':
random.seed()
@@ -193,6 +177,5 @@ if __name__ == '__main__':
wraptest2()
wraptest3()
testDecorator()
- testItems()