summaryrefslogtreecommitdiff
path: root/Lib/collections/__init__.py
diff options
context:
space:
mode:
authorEzio Melotti <ezio.melotti@gmail.com>2011-03-15 06:03:08 +0200
committerEzio Melotti <ezio.melotti@gmail.com>2011-03-15 06:03:08 +0200
commit86816dab0c8ac12eeaaf017a49a03b04849b23d7 (patch)
treec2402b3a7e15a491ec51bc70a0341a1825c97f66 /Lib/collections/__init__.py
parentb9eef2c0632615c2fb749466f4b070d5be899fd9 (diff)
parent5fa03f709383c42df97d35f90570e909d2eb7afa (diff)
downloadcpython-86816dab0c8ac12eeaaf017a49a03b04849b23d7.tar.gz
#11515: Merge with 3.2.
Diffstat (limited to 'Lib/collections/__init__.py')
-rw-r--r--Lib/collections/__init__.py1046
1 files changed, 1046 insertions, 0 deletions
diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py
new file mode 100644
index 0000000000..c324ce3bd6
--- /dev/null
+++ b/Lib/collections/__init__.py
@@ -0,0 +1,1046 @@
+__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
+ 'UserString', 'Counter', 'OrderedDict']
+
+# For backwards compatibility, continue to make the collections ABCs
+# available through the collections module.
+from collections.abc import *
+import collections.abc
+__all__ += collections.abc.__all__
+
+from _collections import deque, defaultdict
+from operator import itemgetter as _itemgetter
+from keyword import iskeyword as _iskeyword
+import sys as _sys
+import heapq as _heapq
+from weakref import proxy as _proxy
+from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
+from reprlib import recursive_repr as _recursive_repr
+
+################################################################################
+### OrderedDict
+################################################################################
+
+class _Link(object):
+ __slots__ = 'prev', 'next', 'key', '__weakref__'
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # The sentinel is stored in self.__hardroot with a weakref proxy in self.__root.
+ # The prev/next links are weakref proxies (to prevent circular references).
+ # Individual links are kept alive by the hard reference in self.__map.
+ # Those hard references disappear when a key is deleted from an OrderedDict.
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__hardroot = _Link()
+ self.__root = root = _proxy(self.__hardroot)
+ root.prev = root.next = root
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value,
+ dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ self.__map[key] = link = Link()
+ root = self.__root
+ last = root.prev
+ link.prev, link.next, link.key = last, root, key
+ last.next = link
+ root.prev = proxy(link)
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link = self.__map.pop(key)
+ link_prev = link.prev
+ link_next = link.next
+ link_prev.next = link_next
+ link_next.prev = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ # Traverse the linked list in order.
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ yield curr.key
+ curr = curr.next
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ # Traverse the linked list in reverse order.
+ root = self.__root
+ curr = root.prev
+ while curr is not root:
+ yield curr.key
+ curr = curr.prev
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ root = self.__root
+ root.prev = root.next = root
+ self.__map.clear()
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root.prev
+ link_prev = link.prev
+ link_prev.next = root
+ root.prev = link_prev
+ else:
+ link = root.next
+ link_next = link.next
+ root.next = link_next
+ link_next.prev = root
+ key = link.key
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ def move_to_end(self, key, last=True):
+ '''Move an existing element to the end (or beginning if last==False).
+
+ Raises KeyError if the element does not exist.
+ When last=True, acts like a fast version of self[key]=self.pop(key).
+
+ '''
+ link = self.__map[key]
+ link_prev = link.prev
+ link_next = link.next
+ link_prev.next = link_next
+ link_next.prev = link_prev
+ root = self.__root
+ if last:
+ last = root.prev
+ link.prev = last
+ link.next = root
+ last.next = root.prev = link
+ else:
+ first = root.next
+ link.prev = root
+ link.next = first
+ root.next = first.prev = link
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__root, self.__hardroot
+ del self.__map, self.__root, self.__hardroot
+ inst_dict = vars(self).copy()
+ self.__map, self.__root, self.__hardroot = tmp
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def __sizeof__(self):
+ sizeof = _sys.getsizeof
+ n = len(self) + 1 # number of links including root
+ size = sizeof(self.__dict__) # instance dictionary
+ size += sizeof(self.__map) * 2 # internal dict and inherited dict
+ size += sizeof(self.__hardroot) * n # link objects
+ size += sizeof(self.__root) * n # proxy objects
+ return size
+
+ update = __update = MutableMapping.update
+ keys = MutableMapping.keys
+ values = MutableMapping.values
+ items = MutableMapping.items
+ __ne__ = MutableMapping.__ne__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'OD.setdefault(k[,d]) -> OD.get(k,d), also set OD[k]=d if k not in OD'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ @_recursive_repr()
+ def __repr__(self):
+ 'od.__repr__() <==> repr(od)'
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, list(self.items()))
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and \
+ all(p==q for p, q in zip(self.items(), other.items()))
+ return dict.__eq__(self, other)
+
+
+################################################################################
+### namedtuple
+################################################################################
+
+def namedtuple(typename, field_names, verbose=False, rename=False):
+ """Returns a new subclass of tuple with named fields.
+
+ >>> Point = namedtuple('Point', 'x y')
+ >>> Point.__doc__ # docstring for the new class
+ 'Point(x, y)'
+ >>> p = Point(11, y=22) # instantiate with positional args or keywords
+ >>> p[0] + p[1] # indexable like a plain tuple
+ 33
+ >>> x, y = p # unpack like a regular tuple
+ >>> x, y
+ (11, 22)
+ >>> p.x + p.y # fields also accessable by name
+ 33
+ >>> d = p._asdict() # convert to a dictionary
+ >>> d['x']
+ 11
+ >>> Point(**d) # convert from a dictionary
+ Point(x=11, y=22)
+ >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
+ Point(x=100, y=22)
+
+ """
+
+ # Parse and validate the field names. Validation serves two purposes,
+ # generating informative error messages and preventing template injection attacks.
+ if isinstance(field_names, str):
+ field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
+ field_names = tuple(map(str, field_names))
+ if rename:
+ names = list(field_names)
+ seen = set()
+ for i, name in enumerate(names):
+ if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
+ or not name or name[0].isdigit() or name.startswith('_')
+ or name in seen):
+ names[i] = '_%d' % i
+ seen.add(name)
+ field_names = tuple(names)
+ for name in (typename,) + field_names:
+ if not all(c.isalnum() or c=='_' for c in name):
+ raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
+ if _iskeyword(name):
+ raise ValueError('Type names and field names cannot be a keyword: %r' % name)
+ if name[0].isdigit():
+ raise ValueError('Type names and field names cannot start with a number: %r' % name)
+ seen_names = set()
+ for name in field_names:
+ if name.startswith('_') and not rename:
+ raise ValueError('Field names cannot start with an underscore: %r' % name)
+ if name in seen_names:
+ raise ValueError('Encountered duplicate field name: %r' % name)
+ seen_names.add(name)
+
+ # Create and fill-in the class template
+ numfields = len(field_names)
+ argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
+ reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+ template = '''class %(typename)s(tuple):
+ '%(typename)s(%(argtxt)s)' \n
+ __slots__ = () \n
+ _fields = %(field_names)r \n
+ def __new__(_cls, %(argtxt)s):
+ 'Create new instance of %(typename)s(%(argtxt)s)'
+ return _tuple.__new__(_cls, (%(argtxt)s)) \n
+ @classmethod
+ def _make(cls, iterable, new=tuple.__new__, len=len):
+ 'Make a new %(typename)s object from a sequence or iterable'
+ result = new(cls, iterable)
+ if len(result) != %(numfields)d:
+ raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
+ return result \n
+ def __repr__(self):
+ 'Return a nicely formatted representation string'
+ return self.__class__.__name__ + '(%(reprtxt)s)' %% self \n
+ def _asdict(self):
+ 'Return a new OrderedDict which maps field names to their values'
+ return OrderedDict(zip(self._fields, self)) \n
+ def _replace(_self, **kwds):
+ 'Return a new %(typename)s object replacing specified fields with new values'
+ result = _self._make(map(kwds.pop, %(field_names)r, _self))
+ if kwds:
+ raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
+ return result \n
+ def __getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return tuple(self) \n\n''' % locals()
+ for i, name in enumerate(field_names):
+ template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
+ if verbose:
+ print(template)
+
+ # Execute the template string in a temporary namespace and
+ # support tracing utilities by setting a value for frame.f_globals['__name__']
+ namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+ OrderedDict=OrderedDict, _property=property, _tuple=tuple)
+ try:
+ exec(template, namespace)
+ except SyntaxError as e:
+ raise SyntaxError(e.msg + ':\n\n' + template)
+ result = namespace[typename]
+
+ # For pickling to work, the __module__ variable needs to be set to the frame
+ # where the named tuple is created. Bypass this step in enviroments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return result
+
+
+########################################################################
+### Counter
+########################################################################
+
+def _count_elements(mapping, iterable):
+ 'Tally elements from the iterable.'
+ mapping_get = mapping.get
+ for elem in iterable:
+ mapping[elem] = mapping_get(elem, 0) + 1
+
+try: # Load C helper function if available
+ from _collections import _count_elements
+except ImportError:
+ pass
+
+class Counter(dict):
+ '''Dict subclass for counting hashable items. Sometimes called a bag
+ or multiset. Elements are stored as dictionary keys and their counts
+ are stored as dictionary values.
+
+ >>> c = Counter('abcdeabcdabcaba') # count elements from a string
+
+ >>> c.most_common(3) # three most common elements
+ [('a', 5), ('b', 4), ('c', 3)]
+ >>> sorted(c) # list all unique elements
+ ['a', 'b', 'c', 'd', 'e']
+ >>> ''.join(sorted(c.elements())) # list elements with repetitions
+ 'aaaaabbbbcccdde'
+ >>> sum(c.values()) # total of all counts
+ 15
+
+ >>> c['a'] # count of letter 'a'
+ 5
+ >>> for elem in 'shazam': # update counts from an iterable
+ ... c[elem] += 1 # by adding 1 to each element's count
+ >>> c['a'] # now there are seven 'a'
+ 7
+ >>> del c['b'] # remove all 'b'
+ >>> c['b'] # now there are zero 'b'
+ 0
+
+ >>> d = Counter('simsalabim') # make another counter
+ >>> c.update(d) # add in the second counter
+ >>> c['a'] # now there are nine 'a'
+ 9
+
+ >>> c.clear() # empty the counter
+ >>> c
+ Counter()
+
+ Note: If a count is set to zero or reduced to zero, it will remain
+ in the counter until the entry is deleted or the counter is cleared:
+
+ >>> c = Counter('aaabbc')
+ >>> c['b'] -= 2 # reduce the count of 'b' by two
+ >>> c.most_common() # 'b' is still in, but its count is zero
+ [('a', 3), ('c', 1), ('b', 0)]
+
+ '''
+ # References:
+ # http://en.wikipedia.org/wiki/Multiset
+ # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
+ # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
+ # http://code.activestate.com/recipes/259174/
+ # Knuth, TAOCP Vol. II section 4.6.3
+
+ def __init__(self, iterable=None, **kwds):
+ '''Create a new, empty Counter object. And if given, count elements
+ from an input iterable. Or, initialize the count from another mapping
+ of elements to their counts.
+
+ >>> c = Counter() # a new, empty counter
+ >>> c = Counter('gallahad') # a new counter from an iterable
+ >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
+ >>> c = Counter(a=4, b=2) # a new counter from keyword args
+
+ '''
+ super().__init__()
+ self.update(iterable, **kwds)
+
+ def __missing__(self, key):
+ 'The count of elements not in the Counter is zero.'
+ # Needed so that self[missing_item] does not raise KeyError
+ return 0
+
+ def most_common(self, n=None):
+ '''List the n most common elements and their counts from the most
+ common to the least. If n is None, then list all element counts.
+
+ >>> Counter('abcdeabcdabcaba').most_common(3)
+ [('a', 5), ('b', 4), ('c', 3)]
+
+ '''
+ # Emulate Bag.sortedByCount from Smalltalk
+ if n is None:
+ return sorted(self.items(), key=_itemgetter(1), reverse=True)
+ return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
+
+ def elements(self):
+ '''Iterator over elements repeating each as many times as its count.
+
+ >>> c = Counter('ABCABC')
+ >>> sorted(c.elements())
+ ['A', 'A', 'B', 'B', 'C', 'C']
+
+ # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
+ >>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
+ >>> product = 1
+ >>> for factor in prime_factors.elements(): # loop over factors
+ ... product *= factor # and multiply them
+ >>> product
+ 1836
+
+ Note, if an element's count has been set to zero or is a negative
+ number, elements() will ignore it.
+
+ '''
+ # Emulate Bag.do from Smalltalk and Multiset.begin from C++.
+ return _chain.from_iterable(_starmap(_repeat, self.items()))
+
+ # Override dict methods where necessary
+
+ @classmethod
+ def fromkeys(cls, iterable, v=None):
+ # There is no equivalent method for counters because setting v=1
+ # means that no element can have a count greater than one.
+ raise NotImplementedError(
+ 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
+
+ def update(self, iterable=None, **kwds):
+ '''Like dict.update() but add counts instead of replacing them.
+
+ Source can be an iterable, a dictionary, or another Counter instance.
+
+ >>> c = Counter('which')
+ >>> c.update('witch') # add elements from another iterable
+ >>> d = Counter('watch')
+ >>> c.update(d) # add elements from another counter
+ >>> c['h'] # four 'h' in which, witch, and watch
+ 4
+
+ '''
+ # The regular dict.update() operation makes no sense here because the
+ # replace behavior results in the some of original untouched counts
+ # being mixed-in with all of the other counts for a mismash that
+ # doesn't have a straight-forward interpretation in most counting
+ # contexts. Instead, we implement straight-addition. Both the inputs
+ # and outputs are allowed to contain zero and negative counts.
+
+ if iterable is not None:
+ if isinstance(iterable, Mapping):
+ if self:
+ self_get = self.get
+ for elem, count in iterable.items():
+ self[elem] = count + self_get(elem, 0)
+ else:
+ super().update(iterable) # fast path when counter is empty
+ else:
+ _count_elements(self, iterable)
+ if kwds:
+ self.update(kwds)
+
+ def subtract(self, iterable=None, **kwds):
+ '''Like dict.update() but subtracts counts instead of replacing them.
+ Counts can be reduced below zero. Both the inputs and outputs are
+ allowed to contain zero and negative counts.
+
+ Source can be an iterable, a dictionary, or another Counter instance.
+
+ >>> c = Counter('which')
+ >>> c.subtract('witch') # subtract elements from another iterable
+ >>> c.subtract(Counter('watch')) # subtract elements from another counter
+ >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
+ 0
+ >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
+ -1
+
+ '''
+ if iterable is not None:
+ self_get = self.get
+ if isinstance(iterable, Mapping):
+ for elem, count in iterable.items():
+ self[elem] = self_get(elem, 0) - count
+ else:
+ for elem in iterable:
+ self[elem] = self_get(elem, 0) - 1
+ if kwds:
+ self.subtract(kwds)
+
+ def copy(self):
+ 'Like dict.copy() but returns a Counter instance instead of a dict.'
+ return Counter(self)
+
+ def __reduce__(self):
+ return self.__class__, (dict(self),)
+
+ def __delitem__(self, elem):
+ 'Like dict.__delitem__() but does not raise KeyError for missing values.'
+ if elem in self:
+ super().__delitem__(elem)
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % self.__class__.__name__
+ items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
+ return '%s({%s})' % (self.__class__.__name__, items)
+
+ # Multiset-style mathematical operations discussed in:
+ # Knuth TAOCP Volume II section 4.6.3 exercise 19
+ # and at http://en.wikipedia.org/wiki/Multiset
+ #
+ # Outputs guaranteed to only include positive counts.
+ #
+ # To strip negative and zero counts, add-in an empty counter:
+ # c += Counter()
+
+ def __add__(self, other):
+ '''Add counts from two counters.
+
+ >>> Counter('abbb') + Counter('bcc')
+ Counter({'b': 4, 'c': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem in set(self) | set(other):
+ newcount = self[elem] + other[elem]
+ if newcount > 0:
+ result[elem] = newcount
+ return result
+
+ def __sub__(self, other):
+ ''' Subtract count, but keep only results with positive counts.
+
+ >>> Counter('abbbc') - Counter('bccd')
+ Counter({'b': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem in set(self) | set(other):
+ newcount = self[elem] - other[elem]
+ if newcount > 0:
+ result[elem] = newcount
+ return result
+
+ def __or__(self, other):
+ '''Union is the maximum of value in either of the input counters.
+
+ >>> Counter('abbb') | Counter('bcc')
+ Counter({'b': 3, 'c': 2, 'a': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ for elem in set(self) | set(other):
+ p, q = self[elem], other[elem]
+ newcount = q if p < q else p
+ if newcount > 0:
+ result[elem] = newcount
+ return result
+
+ def __and__(self, other):
+ ''' Intersection is the minimum of corresponding counts.
+
+ >>> Counter('abbb') & Counter('bcc')
+ Counter({'b': 1})
+
+ '''
+ if not isinstance(other, Counter):
+ return NotImplemented
+ result = Counter()
+ if len(self) < len(other):
+ self, other = other, self
+ for elem in filter(self.__contains__, other):
+ p, q = self[elem], other[elem]
+ newcount = p if p < q else q
+ if newcount > 0:
+ result[elem] = newcount
+ return result
+
+
+########################################################################
+### ChainMap (helper for configparser and string.Template)
+########################################################################
+
+class ChainMap(MutableMapping):
+ ''' A ChainMap groups multiple dicts (or other mappings) together
+ to create a single, updateable view.
+
+ The underlying mappings are stored in a list. That list is public and can
+ accessed or updated using the *maps* attribute. There is no other state.
+
+ Lookups search the underlying mappings successively until a key is found.
+ In contrast, writes, updates, and deletions only operate on the first
+ mapping.
+
+ '''
+
+ def __init__(self, *maps):
+ '''Initialize a ChainMap by setting *maps* to the given mappings.
+ If no mappings are provided, a single empty dictionary is used.
+
+ '''
+ self.maps = list(maps) or [{}] # always at least one map
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ for mapping in self.maps:
+ try:
+ return mapping[key] # can't use 'key in mapping' with defaultdict
+ except KeyError:
+ pass
+ return self.__missing__(key) # support subclasses that define __missing__
+
+ def get(self, key, default=None):
+ return self[key] if key in self else default
+
+ def __len__(self):
+ return len(set().union(*self.maps)) # reuses stored hash values if possible
+
+ def __iter__(self):
+ return iter(set().union(*self.maps))
+
+ def __contains__(self, key):
+ return any(key in m for m in self.maps)
+
+ def __bool__(self):
+ return any(self.maps)
+
+ @_recursive_repr()
+ def __repr__(self):
+ return '{0.__class__.__name__}({1})'.format(
+ self, ', '.join(map(repr, self.maps)))
+
+ @classmethod
+ def fromkeys(cls, iterable, *args):
+ 'Create a ChainMap with a single dict created from the iterable.'
+ return cls(dict.fromkeys(iterable, *args))
+
+ def copy(self):
+ 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
+ return self.__class__(self.maps[0].copy(), *self.maps[1:])
+
+ __copy__ = copy
+
+ def new_child(self): # like Django's Context.push()
+ 'New ChainMap with a new dict followed by all previous maps.'
+ return self.__class__({}, *self.maps)
+
+ @property
+ def parents(self): # like Django's Context.pop()
+ 'New ChainMap from maps[1:].'
+ return self.__class__(*self.maps[1:])
+
+ def __setitem__(self, key, value):
+ self.maps[0][key] = value
+
+ def __delitem__(self, key):
+ try:
+ del self.maps[0][key]
+ except KeyError:
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+ def popitem(self):
+ 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
+ try:
+ return self.maps[0].popitem()
+ except KeyError:
+ raise KeyError('No keys found in the first mapping.')
+
+ def pop(self, key, *args):
+ 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
+ try:
+ return self.maps[0].pop(key, *args)
+ except KeyError:
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+ def clear(self):
+ 'Clear maps[0], leaving maps[1:] intact.'
+ self.maps[0].clear()
+
+
+################################################################################
+### UserDict
+################################################################################
+
+class UserDict(MutableMapping):
+
+ # Start by filling-out the abstract methods
+ def __init__(self, dict=None, **kwargs):
+ self.data = {}
+ if dict is not None:
+ self.update(dict)
+ if len(kwargs):
+ self.update(kwargs)
+ def __len__(self): return len(self.data)
+ def __getitem__(self, key):
+ if key in self.data:
+ return self.data[key]
+ if hasattr(self.__class__, "__missing__"):
+ return self.__class__.__missing__(self, key)
+ raise KeyError(key)
+ def __setitem__(self, key, item): self.data[key] = item
+ def __delitem__(self, key): del self.data[key]
+ def __iter__(self):
+ return iter(self.data)
+
+ # Modify __contains__ to work correctly when __missing__ is present
+ def __contains__(self, key):
+ return key in self.data
+
+ # Now, add the methods in dicts but not in MutableMapping
+ def __repr__(self): return repr(self.data)
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data.copy())
+ import copy
+ data = self.data
+ try:
+ self.data = {}
+ c = copy.copy(self)
+ finally:
+ self.data = data
+ c.update(self)
+ return c
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+
+
+################################################################################
+### UserList
+################################################################################
+
+class UserList(MutableSequence):
+ """A more or less complete user-defined wrapper around list objects."""
+ def __init__(self, initlist=None):
+ self.data = []
+ if initlist is not None:
+ # XXX should this accept an arbitrary sequence?
+ if type(initlist) == type(self.data):
+ self.data[:] = initlist
+ elif isinstance(initlist, UserList):
+ self.data[:] = initlist.data[:]
+ else:
+ self.data = list(initlist)
+ def __repr__(self): return repr(self.data)
+ def __lt__(self, other): return self.data < self.__cast(other)
+ def __le__(self, other): return self.data <= self.__cast(other)
+ def __eq__(self, other): return self.data == self.__cast(other)
+ def __ne__(self, other): return self.data != self.__cast(other)
+ def __gt__(self, other): return self.data > self.__cast(other)
+ def __ge__(self, other): return self.data >= self.__cast(other)
+ def __cast(self, other):
+ return other.data if isinstance(other, UserList) else other
+ def __contains__(self, item): return item in self.data
+ def __len__(self): return len(self.data)
+ def __getitem__(self, i): return self.data[i]
+ def __setitem__(self, i, item): self.data[i] = item
+ def __delitem__(self, i): del self.data[i]
+ def __add__(self, other):
+ if isinstance(other, UserList):
+ return self.__class__(self.data + other.data)
+ elif isinstance(other, type(self.data)):
+ return self.__class__(self.data + other)
+ return self.__class__(self.data + list(other))
+ def __radd__(self, other):
+ if isinstance(other, UserList):
+ return self.__class__(other.data + self.data)
+ elif isinstance(other, type(self.data)):
+ return self.__class__(other + self.data)
+ return self.__class__(list(other) + self.data)
+ def __iadd__(self, other):
+ if isinstance(other, UserList):
+ self.data += other.data
+ elif isinstance(other, type(self.data)):
+ self.data += other
+ else:
+ self.data += list(other)
+ return self
+ def __mul__(self, n):
+ return self.__class__(self.data*n)
+ __rmul__ = __mul__
+ def __imul__(self, n):
+ self.data *= n
+ return self
+ def append(self, item): self.data.append(item)
+ def insert(self, i, item): self.data.insert(i, item)
+ def pop(self, i=-1): return self.data.pop(i)
+ def remove(self, item): self.data.remove(item)
+ def clear(self): self.data.clear()
+ def copy(self): return self.data.copy()
+ def count(self, item): return self.data.count(item)
+ def index(self, item, *args): return self.data.index(item, *args)
+ def reverse(self): self.data.reverse()
+ def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
+ def extend(self, other):
+ if isinstance(other, UserList):
+ self.data.extend(other.data)
+ else:
+ self.data.extend(other)
+
+
+
+################################################################################
+### UserString
+################################################################################
+
+class UserString(Sequence):
+ def __init__(self, seq):
+ if isinstance(seq, str):
+ self.data = seq
+ elif isinstance(seq, UserString):
+ self.data = seq.data[:]
+ else:
+ self.data = str(seq)
+ def __str__(self): return str(self.data)
+ def __repr__(self): return repr(self.data)
+ def __int__(self): return int(self.data)
+ def __float__(self): return float(self.data)
+ def __complex__(self): return complex(self.data)
+ def __hash__(self): return hash(self.data)
+
+ def __eq__(self, string):
+ if isinstance(string, UserString):
+ return self.data == string.data
+ return self.data == string
+ def __ne__(self, string):
+ if isinstance(string, UserString):
+ return self.data != string.data
+ return self.data != string
+ def __lt__(self, string):
+ if isinstance(string, UserString):
+ return self.data < string.data
+ return self.data < string
+ def __le__(self, string):
+ if isinstance(string, UserString):
+ return self.data <= string.data
+ return self.data <= string
+ def __gt__(self, string):
+ if isinstance(string, UserString):
+ return self.data > string.data
+ return self.data > string
+ def __ge__(self, string):
+ if isinstance(string, UserString):
+ return self.data >= string.data
+ return self.data >= string
+
+ def __contains__(self, char):
+ if isinstance(char, UserString):
+ char = char.data
+ return char in self.data
+
+ def __len__(self): return len(self.data)
+ def __getitem__(self, index): return self.__class__(self.data[index])
+ def __add__(self, other):
+ if isinstance(other, UserString):
+ return self.__class__(self.data + other.data)
+ elif isinstance(other, str):
+ return self.__class__(self.data + other)
+ return self.__class__(self.data + str(other))
+ def __radd__(self, other):
+ if isinstance(other, str):
+ return self.__class__(other + self.data)
+ return self.__class__(str(other) + self.data)
+ def __mul__(self, n):
+ return self.__class__(self.data*n)
+ __rmul__ = __mul__
+ def __mod__(self, args):
+ return self.__class__(self.data % args)
+
+ # the following methods are defined in alphabetical order:
+ def capitalize(self): return self.__class__(self.data.capitalize())
+ def center(self, width, *args):
+ return self.__class__(self.data.center(width, *args))
+ def count(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, UserString):
+ sub = sub.data
+ return self.data.count(sub, start, end)
+ def encode(self, encoding=None, errors=None): # XXX improve this?
+ if encoding:
+ if errors:
+ return self.__class__(self.data.encode(encoding, errors))
+ return self.__class__(self.data.encode(encoding))
+ return self.__class__(self.data.encode())
+ def endswith(self, suffix, start=0, end=_sys.maxsize):
+ return self.data.endswith(suffix, start, end)
+ def expandtabs(self, tabsize=8):
+ return self.__class__(self.data.expandtabs(tabsize))
+ def find(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, UserString):
+ sub = sub.data
+ return self.data.find(sub, start, end)
+ def format(self, *args, **kwds):
+ return self.data.format(*args, **kwds)
+ def index(self, sub, start=0, end=_sys.maxsize):
+ return self.data.index(sub, start, end)
+ def isalpha(self): return self.data.isalpha()
+ def isalnum(self): return self.data.isalnum()
+ def isdecimal(self): return self.data.isdecimal()
+ def isdigit(self): return self.data.isdigit()
+ def isidentifier(self): return self.data.isidentifier()
+ def islower(self): return self.data.islower()
+ def isnumeric(self): return self.data.isnumeric()
+ def isspace(self): return self.data.isspace()
+ def istitle(self): return self.data.istitle()
+ def isupper(self): return self.data.isupper()
+ def join(self, seq): return self.data.join(seq)
+ def ljust(self, width, *args):
+ return self.__class__(self.data.ljust(width, *args))
+ def lower(self): return self.__class__(self.data.lower())
+ def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+ def partition(self, sep):
+ return self.data.partition(sep)
+ def replace(self, old, new, maxsplit=-1):
+ if isinstance(old, UserString):
+ old = old.data
+ if isinstance(new, UserString):
+ new = new.data
+ return self.__class__(self.data.replace(old, new, maxsplit))
+ def rfind(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, UserString):
+ sub = sub.data
+ return self.data.rfind(sub, start, end)
+ def rindex(self, sub, start=0, end=_sys.maxsize):
+ return self.data.rindex(sub, start, end)
+ def rjust(self, width, *args):
+ return self.__class__(self.data.rjust(width, *args))
+ def rpartition(self, sep):
+ return self.data.rpartition(sep)
+ def rstrip(self, chars=None):
+ return self.__class__(self.data.rstrip(chars))
+ def split(self, sep=None, maxsplit=-1):
+ return self.data.split(sep, maxsplit)
+ def rsplit(self, sep=None, maxsplit=-1):
+ return self.data.rsplit(sep, maxsplit)
+ def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+ def startswith(self, prefix, start=0, end=_sys.maxsize):
+ return self.data.startswith(prefix, start, end)
+ def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+ def swapcase(self): return self.__class__(self.data.swapcase())
+ def title(self): return self.__class__(self.data.title())
+ def translate(self, *args):
+ return self.__class__(self.data.translate(*args))
+ def upper(self): return self.__class__(self.data.upper())
+ def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+
+
+################################################################################
+### Simple tests
+################################################################################
+
+if __name__ == '__main__':
+ # verify that instances can be pickled
+ from pickle import loads, dumps
+ Point = namedtuple('Point', 'x, y', True)
+ p = Point(x=10, y=20)
+ assert p == loads(dumps(p))
+
+ # test and demonstrate ability to override methods
+ class Point(namedtuple('Point', 'x y')):
+ __slots__ = ()
+ @property
+ def hypot(self):
+ return (self.x ** 2 + self.y ** 2) ** 0.5
+ def __str__(self):
+ return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
+
+ for p in Point(3, 4), Point(14, 5/7.):
+ print (p)
+
+ class Point(namedtuple('Point', 'x y')):
+ 'Point class with optimized _make() and _replace() without error-checking'
+ __slots__ = ()
+ _make = classmethod(tuple.__new__)
+ def _replace(self, _map=map, **kwds):
+ return self._make(_map(kwds.get, ('x', 'y'), self))
+
+ print(Point(11, 22)._replace(x=100))
+
+ Point3D = namedtuple('Point3D', Point._fields + ('z',))
+ print(Point3D.__doc__)
+
+ import doctest
+ TestResults = namedtuple('TestResults', 'failed attempted')
+ print(TestResults(*doctest.testmod()))