summaryrefslogtreecommitdiff
path: root/cheetah/Utils
diff options
context:
space:
mode:
authorR. Tyler Ballance <tyler@monkeypox.org>2009-08-14 15:11:24 -0700
committerR. Tyler Ballance <tyler@monkeypox.org>2009-08-14 15:11:24 -0700
commit988f6da0fc7c211f654352ec1453f0ef168da7cf (patch)
tree50ab6734ef1cacb71ff2ba30da76a3cfce51365e /cheetah/Utils
parentf8c53e6fdaf28e1461456bf844c59a10e15bee68 (diff)
parent53144fffd7754476b8b866b7b52fa9faab1548e4 (diff)
downloadpython-cheetah-988f6da0fc7c211f654352ec1453f0ef168da7cf.tar.gz
Merge branch 'next' into performance
Left some rename conflicts partially unresolved, need to move src/c/ to cheetah/c/ in the next commit Conflicts: SetupConfig.py cheetah/_namemapper.c src/_namemapper.c src/c/_namemapper.c
Diffstat (limited to 'cheetah/Utils')
-rw-r--r--cheetah/Utils/Indenter.py123
-rw-r--r--cheetah/Utils/Misc.py81
-rw-r--r--cheetah/Utils/VerifyType.py83
-rw-r--r--cheetah/Utils/WebInputMixin.py102
-rw-r--r--cheetah/Utils/__init__.py1
-rw-r--r--cheetah/Utils/htmlDecode.py14
-rw-r--r--cheetah/Utils/htmlEncode.py21
-rw-r--r--cheetah/Utils/memcache.py624
-rw-r--r--cheetah/Utils/statprof.py304
9 files changed, 1353 insertions, 0 deletions
diff --git a/cheetah/Utils/Indenter.py b/cheetah/Utils/Indenter.py
new file mode 100644
index 0000000..52c142d
--- /dev/null
+++ b/cheetah/Utils/Indenter.py
@@ -0,0 +1,123 @@
+"""
+Indentation maker.
+@@TR: this code is unsupported and largely undocumented ...
+
+This version is based directly on code by Robert Kuzelj
+<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and
+attributes have been renamed. Indentation is output via
+$self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the
+searchList and another one being found. The directive syntax will
+soon be changed somewhat.
+"""
+
+import re
+import sys
+
+def indentize(source):
+ return IndentProcessor().process(source)
+
+class IndentProcessor(object):
+ """Preprocess #indent tags."""
+ LINE_SEP = '\n'
+ ARGS = "args"
+ INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)')
+ DIRECTIVE = re.compile(r"[ \t]*#")
+ WS = "ws"
+ WHITESPACES = re.compile(r"(?P<ws>[ \t]*)")
+
+ INC = "++"
+ DEC = "--"
+
+ SET = "="
+ CHAR = "char"
+
+ ON = "on"
+ OFF = "off"
+
+ PUSH = "push"
+ POP = "pop"
+
+ def process(self, _txt):
+ result = []
+
+ for line in _txt.splitlines():
+ match = self.INDENT_DIR.match(line)
+ if match:
+ #is indention directive
+ args = match.group(self.ARGS).strip()
+ if args == self.ON:
+ line = "#silent $self._CHEETAH__indenter.on()"
+ elif args == self.OFF:
+ line = "#silent $self._CHEETAH__indenter.off()"
+ elif args == self.INC:
+ line = "#silent $self._CHEETAH__indenter.inc()"
+ elif args == self.DEC:
+ line = "#silent $self._CHEETAH__indenter.dec()"
+ elif args.startswith(self.SET):
+ level = int(args[1:])
+ line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level}
+ elif args.startswith('chars'):
+ self.indentChars = eval(args.split('=')[1])
+ line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level}
+ elif args.startswith(self.PUSH):
+ line = "#silent $self._CHEETAH__indenter.push()"
+ elif args.startswith(self.POP):
+ line = "#silent $self._CHEETAH__indenter.pop()"
+ else:
+ match = self.DIRECTIVE.match(line)
+ if not match:
+ #is not another directive
+ match = self.WHITESPACES.match(line)
+ if match:
+ size = len(match.group("ws").expandtabs(4))
+ line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip()
+ else:
+ line = "${self._CHEETAH__indenter.indent(0)}" + line
+ result.append(line)
+
+ return self.LINE_SEP.join(result)
+
+class Indenter(object):
+ """
+ A class that keeps track of the current indentation level.
+ .indent() returns the appropriate amount of indentation.
+ """
+ On = 1
+ Level = 0
+ Chars = ' '
+ LevelStack = []
+
+ def on(self):
+ self.On = 1
+ def off(self):
+ self.On = 0
+ def inc(self):
+ self.Level += 1
+ def dec(self):
+ """decrement can only be applied to values greater zero
+ values below zero don't make any sense at all!"""
+ if self.Level > 0:
+ self.Level -= 1
+ def push(self):
+ self.LevelStack.append(self.Level)
+ def pop(self):
+ """the levestack can not become -1. any attempt to do so
+ sets the level to 0!"""
+ if len(self.LevelStack) > 0:
+ self.Level = self.LevelStack.pop()
+ else:
+ self.Level = 0
+ def setLevel(self, _level):
+ """the leve can't be less than zero. any attempt to do so
+ sets the level automatically to zero!"""
+ if _level < 0:
+ self.Level = 0
+ else:
+ self.Level = _level
+ def setChar(self, _chars):
+ self.Chars = _chars
+ def indent(self, _default=0):
+ if self.On:
+ return self.Chars * self.Level
+ return " " * _default
+
diff --git a/cheetah/Utils/Misc.py b/cheetah/Utils/Misc.py
new file mode 100644
index 0000000..6ff5bb2
--- /dev/null
+++ b/cheetah/Utils/Misc.py
@@ -0,0 +1,81 @@
+# $Id: Misc.py,v 1.8 2005/11/02 22:26:08 tavis_rudd Exp $
+"""Miscellaneous functions/objects used by Cheetah but also useful standalone.
+
+Meta-Data
+================================================================================
+Author: Mike Orr <iron@mso.oz.net>
+License: This software is released for unlimited distribution under the
+ terms of the MIT license. See the LICENSE file.
+Version: $Revision: 1.8 $
+Start Date: 2001/11/07
+Last Revision Date: $Date: 2005/11/02 22:26:08 $
+"""
+__author__ = "Mike Orr <iron@mso.oz.net>"
+__revision__ = "$Revision: 1.8 $"[11:-2]
+
+import os # Used in mkdirsWithPyInitFile.
+import types # Used in useOrRaise.
+import sys # Used in die.
+
+##################################################
+## MISCELLANEOUS FUNCTIONS
+
+def die(reason):
+ sys.stderr.write(reason + '\n')
+ sys.exit(1)
+
+def useOrRaise(thing, errmsg=''):
+ """Raise 'thing' if it's a subclass of Exception. Otherwise return it.
+
+ Called by: Cheetah.Servlet.cgiImport()
+ """
+ if type(thing) == types.ClassType and issubclass(thing, Exception):
+ raise thing(errmsg)
+ return thing
+
+
+def checkKeywords(dic, legalKeywords, what='argument'):
+ """Verify no illegal keyword arguments were passed to a function.
+
+ in : dic, dictionary (**kw in the calling routine).
+ legalKeywords, list of strings, the keywords that are allowed.
+ what, string, suffix for error message (see function source).
+ out: None.
+ exc: TypeError if 'dic' contains a key not in 'legalKeywords'.
+ called by: Cheetah.Template.__init__()
+ """
+ # XXX legalKeywords could be a set when sets get added to Python.
+ for k in dic.keys(): # Can be dic.iterkeys() if Python >= 2.2.
+ if k not in legalKeywords:
+ raise TypeError("'%s' is not a valid %s" % (k, what))
+
+
+def removeFromList(list_, *elements):
+ """Save as list_.remove(each element) but don't raise an error if
+ element is missing. Modifies 'list_' in place! Returns None.
+ """
+ for elm in elements:
+ try:
+ list_.remove(elm)
+ except ValueError:
+ pass
+
+
+def mkdirsWithPyInitFiles(path):
+ """Same as os.makedirs (mkdir 'path' and all missing parent directories)
+ but also puts a Python '__init__.py' file in every directory it
+ creates. Does nothing (without creating an '__init__.py' file) if the
+ directory already exists.
+ """
+ dir, fil = os.path.split(path)
+ if dir and not os.path.exists(dir):
+ mkdirsWithPyInitFiles(dir)
+ if not os.path.exists(path):
+ os.mkdir(path)
+ init = os.path.join(path, "__init__.py")
+ f = open(init, 'w') # Open and close to produce empty file.
+ f.close()
+
+
+
+# vim: shiftwidth=4 tabstop=4 expandtab
diff --git a/cheetah/Utils/VerifyType.py b/cheetah/Utils/VerifyType.py
new file mode 100644
index 0000000..11a435d
--- /dev/null
+++ b/cheetah/Utils/VerifyType.py
@@ -0,0 +1,83 @@
+# $Id: VerifyType.py,v 1.4 2005/11/02 22:26:08 tavis_rudd Exp $
+"""Functions to verify an argument's type
+
+Meta-Data
+================================================================================
+Author: Mike Orr <iron@mso.oz.net>
+License: This software is released for unlimited distribution under the
+ terms of the MIT license. See the LICENSE file.
+Version: $Revision: 1.4 $
+Start Date: 2001/11/07
+Last Revision Date: $Date: 2005/11/02 22:26:08 $
+"""
+__author__ = "Mike Orr <iron@mso.oz.net>"
+__revision__ = "$Revision: 1.4 $"[11:-2]
+
+##################################################
+## DEPENDENCIES
+
+import types # Used in VerifyTypeClass.
+
+##################################################
+## PRIVATE FUNCTIONS
+
+def _errmsg(argname, ltd, errmsgExtra=''):
+ """Construct an error message.
+
+ argname, string, the argument name.
+ ltd, string, description of the legal types.
+ errmsgExtra, string, text to append to error mssage.
+ Returns: string, the error message.
+ """
+ if errmsgExtra:
+ errmsgExtra = '\n' + errmsgExtra
+ return "arg '%s' must be %s%s" % (argname, ltd, errmsgExtra)
+
+
+##################################################
+## TYPE VERIFICATION FUNCTIONS
+
+def VerifyType(arg, argname, legalTypes, ltd, errmsgExtra=''):
+ """Verify the type of an argument.
+
+ arg, any, the argument.
+ argname, string, name of the argument.
+ legalTypes, list of type objects, the allowed types.
+ ltd, string, description of legal types (for error message).
+ errmsgExtra, string, text to append to error message.
+ Returns: None.
+ Exceptions: TypeError if 'arg' is the wrong type.
+ """
+ if type(arg) not in legalTypes:
+ m = _errmsg(argname, ltd, errmsgExtra)
+ raise TypeError(m)
+ return True
+
+
+def VerifyTypeClass(arg, argname, legalTypes, ltd, klass, errmsgExtra=''):
+ """Same, but if it's a class, verify it's a subclass of the right class.
+
+ arg, any, the argument.
+ argname, string, name of the argument.
+ legalTypes, list of type objects, the allowed types.
+ ltd, string, description of legal types (for error message).
+ klass, class, the parent class.
+ errmsgExtra, string, text to append to the error message.
+ Returns: None.
+ Exceptions: TypeError if 'arg' is the wrong type.
+ """
+ VerifyType(arg, argname, legalTypes, ltd, errmsgExtra)
+ # If no exception, the arg is a legal type.
+ if type(arg) == types.ClassType and not issubclass(arg, klass):
+ # Must test for "is class type" to avoid TypeError from issubclass().
+ m = _errmsg(argname, ltd, errmsgExtra)
+ raise TypeError(m)
+ return True
+
+# @@MO: Commented until we determine whether it's useful.
+#def VerifyClass(arg, argname, klass, ltd):
+# """Same, but allow *only* a subclass of the right class.
+# """
+# VerifyTypeClass(arg, argname, [types.ClassType], ltd, klass)
+
+# vim: shiftwidth=4 tabstop=4 expandtab
diff --git a/cheetah/Utils/WebInputMixin.py b/cheetah/Utils/WebInputMixin.py
new file mode 100644
index 0000000..52b6220
--- /dev/null
+++ b/cheetah/Utils/WebInputMixin.py
@@ -0,0 +1,102 @@
+# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $
+"""Provides helpers for Template.webInput(), a method for importing web
+transaction variables in bulk. See the docstring of webInput for full details.
+
+Meta-Data
+================================================================================
+Author: Mike Orr <iron@mso.oz.net>
+License: This software is released for unlimited distribution under the
+ terms of the MIT license. See the LICENSE file.
+Version: $Revision: 1.10 $
+Start Date: 2002/03/17
+Last Revision Date: $Date: 2006/01/06 21:56:54 $
+"""
+__author__ = "Mike Orr <iron@mso.oz.net>"
+__revision__ = "$Revision: 1.10 $"[11:-2]
+
+from Cheetah.Utils.Misc import useOrRaise
+
+class NonNumericInputError(ValueError): pass
+
+##################################################
+## PRIVATE FUNCTIONS AND CLASSES
+
+class _Converter:
+ """A container object for info about type converters.
+ .name, string, name of this converter (for error messages).
+ .func, function, factory function.
+ .default, value to use or raise if the real value is missing.
+ .error, value to use or raise if .func() raises an exception.
+ """
+ def __init__(self, name, func, default, error):
+ self.name = name
+ self.func = func
+ self.default = default
+ self.error = error
+
+
+def _lookup(name, func, multi, converters):
+ """Look up a Webware field/cookie/value/session value. Return
+ '(realName, value)' where 'realName' is like 'name' but with any
+ conversion suffix strips off. Applies numeric conversion and
+ single vs multi values according to the comments in the source.
+ """
+ # Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int".
+ # If there's no colon, the suffix is "". 'longName' is the name with the
+ # suffix, 'shortName' is without.
+ # XXX This implementation assumes "height:" means "height".
+ colon = name.find(':')
+ if colon != -1:
+ longName = name
+ shortName, ext = name[:colon], name[colon+1:]
+ else:
+ longName = shortName = name
+ ext = ''
+
+ # Step 2 -- look up the values by calling 'func'.
+ if longName != shortName:
+ values = func(longName, None) or func(shortName, None)
+ else:
+ values = func(shortName, None)
+ # 'values' is a list of strings, a string or None.
+
+ # Step 3 -- Coerce 'values' to a list of zero, one or more strings.
+ if values is None:
+ values = []
+ elif isinstance(values, str):
+ values = [values]
+
+ # Step 4 -- Find a _Converter object or raise TypeError.
+ try:
+ converter = converters[ext]
+ except KeyError:
+ fmt = "'%s' is not a valid converter name in '%s'"
+ tup = (ext, longName)
+ raise TypeError(fmt % tup)
+
+ # Step 5 -- if there's a converter func, run it on each element.
+ # If the converter raises an exception, use or raise 'converter.error'.
+ if converter.func is not None:
+ tmp = values[:]
+ values = []
+ for elm in tmp:
+ try:
+ elm = converter.func(elm)
+ except (TypeError, ValueError):
+ tup = converter.name, elm
+ errmsg = "%s '%s' contains invalid characters" % tup
+ elm = useOrRaise(converter.error, errmsg)
+ values.append(elm)
+ # 'values' is now a list of strings, ints or floats.
+
+ # Step 6 -- If we're supposed to return a multi value, return the list
+ # as is. If we're supposed to return a single value and the list is
+ # empty, return or raise 'converter.default'. Otherwise, return the
+ # first element in the list and ignore any additional values.
+ if multi:
+ return shortName, values
+ if len(values) == 0:
+ return shortName, useOrRaise(converter.default)
+ return shortName, values[0]
+
+# vim: sw=4 ts=4 expandtab
diff --git a/cheetah/Utils/__init__.py b/cheetah/Utils/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/cheetah/Utils/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/cheetah/Utils/htmlDecode.py b/cheetah/Utils/htmlDecode.py
new file mode 100644
index 0000000..2832a74
--- /dev/null
+++ b/cheetah/Utils/htmlDecode.py
@@ -0,0 +1,14 @@
+"""This is a copy of the htmlDecode function in Webware.
+
+@@TR: It implemented more efficiently.
+
+"""
+
+from Cheetah.Utils.htmlEncode import htmlCodesReversed
+
+def htmlDecode(s, codes=htmlCodesReversed):
+ """ Returns the ASCII decoded version of the given HTML string. This does
+ NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode()."""
+ for code in codes:
+ s = s.replace(code[1], code[0])
+ return s
diff --git a/cheetah/Utils/htmlEncode.py b/cheetah/Utils/htmlEncode.py
new file mode 100644
index 0000000..f76c77e
--- /dev/null
+++ b/cheetah/Utils/htmlEncode.py
@@ -0,0 +1,21 @@
+"""This is a copy of the htmlEncode function in Webware.
+
+
+@@TR: It implemented more efficiently.
+
+"""
+htmlCodes = [
+ ['&', '&amp;'],
+ ['<', '&lt;'],
+ ['>', '&gt;'],
+ ['"', '&quot;'],
+]
+htmlCodesReversed = htmlCodes[:]
+htmlCodesReversed.reverse()
+
+def htmlEncode(s, codes=htmlCodes):
+ """ Returns the HTML encoded version of the given string. This is useful to
+ display a plain ASCII text string on a web page."""
+ for code in codes:
+ s = s.replace(code[0], code[1])
+ return s
diff --git a/cheetah/Utils/memcache.py b/cheetah/Utils/memcache.py
new file mode 100644
index 0000000..ee9678d
--- /dev/null
+++ b/cheetah/Utils/memcache.py
@@ -0,0 +1,624 @@
+
+"""
+client module for memcached (memory cache daemon)
+
+Overview
+========
+
+See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
+
+Usage summary
+=============
+
+This should give you a feel for how this module operates::
+
+ import memcache
+ mc = memcache.Client(['127.0.0.1:11211'], debug=0)
+
+ mc.set("some_key", "Some value")
+ value = mc.get("some_key")
+
+ mc.set("another_key", 3)
+ mc.delete("another_key")
+
+ mc.set("key", "1") # note that the key used for incr/decr must be a string.
+ mc.incr("key")
+ mc.decr("key")
+
+The standard way to use memcache with a database is like this::
+
+ key = derive_key(obj)
+ obj = mc.get(key)
+ if not obj:
+ obj = backend_api.get(...)
+ mc.set(key, obj)
+
+ # we now have obj, and future passes through this code
+ # will use the object from the cache.
+
+Detailed Documentation
+======================
+
+More detailed documentation is available in the L{Client} class.
+"""
+
+import sys
+import socket
+import time
+import types
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+__author__ = "Evan Martin <martine@danga.com>"
+__version__ = "1.2_tummy5"
+__copyright__ = "Copyright (C) 2003 Danga Interactive"
+__license__ = "Python"
+
+class _Error(Exception):
+ pass
+
+class Client:
+ """
+ Object representing a pool of memcache servers.
+
+ See L{memcache} for an overview.
+
+ In all cases where a key is used, the key can be either:
+ 1. A simple hashable type (string, integer, etc.).
+ 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
+ making this module calculate a hash value. You may prefer, for
+ example, to keep all of a given user's objects on the same memcache
+ server, so you could use the user's unique id as the hash value.
+
+ @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
+ @group Insertion: set, add, replace
+ @group Retrieval: get, get_multi
+ @group Integers: incr, decr
+ @group Removal: delete
+ @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
+ set, add, replace, get, get_multi, incr, decr, delete
+ """
+
+ _usePickle = False
+ _FLAG_PICKLE = 1<<0
+ _FLAG_INTEGER = 1<<1
+ _FLAG_LONG = 1<<2
+
+ _SERVER_RETRIES = 10 # how many times to try finding a free server.
+
+ def __init__(self, servers, debug=0):
+ """
+ Create a new Client object with the given list of servers.
+
+ @param servers: C{servers} is passed to L{set_servers}.
+ @param debug: whether to display error messages when a server can't be
+ contacted.
+ """
+ self.set_servers(servers)
+ self.debug = debug
+ self.stats = {}
+
+ def set_servers(self, servers):
+ """
+ Set the pool of servers used by this client.
+
+ @param servers: an array of servers.
+ Servers can be passed in two forms:
+ 1. Strings of the form C{"host:port"}, which implies a default weight of 1.
+ 2. Tuples of the form C{("host:port", weight)}, where C{weight} is
+ an integer weight value.
+ """
+ self.servers = [_Host(s, self.debuglog) for s in servers]
+ self._init_buckets()
+
+ def get_stats(self):
+ '''Get statistics from each of the servers.
+
+ @return: A list of tuples ( server_identifier, stats_dictionary ).
+ The dictionary contains a number of name/value pairs specifying
+ the name of the status field and the string value associated with
+ it. The values are not converted from strings.
+ '''
+ data = []
+ for s in self.servers:
+ if not s.connect(): continue
+ name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
+ s.send_cmd('stats')
+ serverData = {}
+ data.append(( name, serverData ))
+ readline = s.readline
+ while 1:
+ line = readline()
+ if not line or line.strip() == 'END': break
+ stats = line.split(' ', 2)
+ serverData[stats[1]] = stats[2]
+
+ return(data)
+
+ def flush_all(self):
+ 'Expire all data currently in the memcache servers.'
+ for s in self.servers:
+ if not s.connect(): continue
+ s.send_cmd('flush_all')
+ s.expect("OK")
+
+ def debuglog(self, str):
+ if self.debug:
+ sys.stderr.write("MemCached: %s\n" % str)
+
+ def _statlog(self, func):
+ if not self.stats.has_key(func):
+ self.stats[func] = 1
+ else:
+ self.stats[func] += 1
+
+ def forget_dead_hosts(self):
+ """
+ Reset every host in the pool to an "alive" state.
+ """
+ for s in self.servers:
+ s.dead_until = 0
+
+ def _init_buckets(self):
+ self.buckets = []
+ for server in self.servers:
+ for i in range(server.weight):
+ self.buckets.append(server)
+
+ def _get_server(self, key):
+ if type(key) == types.TupleType:
+ serverhash = key[0]
+ key = key[1]
+ else:
+ serverhash = hash(key)
+
+ for i in range(Client._SERVER_RETRIES):
+ server = self.buckets[serverhash % len(self.buckets)]
+ if server.connect():
+ #print "(using server %s)" % server,
+ return server, key
+ serverhash = hash(str(serverhash) + str(i))
+ return None, None
+
+ def disconnect_all(self):
+ for s in self.servers:
+ s.close_socket()
+
+ def delete(self, key, time=0):
+ '''Deletes a key from the memcache.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+ self._statlog('delete')
+ if time != None:
+ cmd = "delete %s %d" % (key, time)
+ else:
+ cmd = "delete %s" % key
+
+ try:
+ server.send_cmd(cmd)
+ server.expect("DELETED")
+ except socket.error, msg:
+ server.mark_dead(msg[1])
+ return 0
+ return 1
+
+ def incr(self, key, delta=1):
+ """
+ Sends a command to the server to atomically increment the value for C{key} by
+ C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
+ exist on server, otherwise it returns the new value after incrementing.
+
+ Note that the value for C{key} must already exist in the memcache, and it
+ must be the string representation of an integer.
+
+ >>> mc.set("counter", "20") # returns 1, indicating success
+ 1
+ >>> mc.incr("counter")
+ 21
+ >>> mc.incr("counter")
+ 22
+
+ Overflow on server is not checked. Be aware of values approaching
+ 2**32. See L{decr}.
+
+ @param delta: Integer amount to increment by (should be zero or greater).
+ @return: New value after incrementing.
+ @rtype: int
+ """
+ return self._incrdecr("incr", key, delta)
+
+ def decr(self, key, delta=1):
+ """
+ Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
+ new values are capped at 0. If server value is 1, a decrement of 2
+ returns 0, not -1.
+
+ @param delta: Integer amount to decrement by (should be zero or greater).
+ @return: New value after decrementing.
+ @rtype: int
+ """
+ return self._incrdecr("decr", key, delta)
+
+ def _incrdecr(self, cmd, key, delta):
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+ self._statlog(cmd)
+ cmd = "%s %s %d" % (cmd, key, delta)
+ try:
+ server.send_cmd(cmd)
+ line = server.readline()
+ return int(line)
+ except socket.error, msg:
+ server.mark_dead(msg[1])
+ return None
+
+ def add(self, key, val, time=0):
+ '''
+ Add new key with value.
+
+ Like L{set}, but only stores in memcache if the key doesn\'t already exist.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("add", key, val, time)
+ def replace(self, key, val, time=0):
+ '''Replace existing key with value.
+
+ Like L{set}, but only stores in memcache if the key already exists.
+ The opposite of L{add}.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("replace", key, val, time)
+ def set(self, key, val, time=0):
+ '''Unconditionally sets a key to a given value in the memcache.
+
+ The C{key} can optionally be an tuple, with the first element being the
+ hash value, if you want to avoid making this module calculate a hash value.
+ You may prefer, for example, to keep all of a given user's objects on the
+ same memcache server, so you could use the user's unique id as the hash
+ value.
+
+ @return: Nonzero on success.
+ @rtype: int
+ '''
+ return self._set("set", key, val, time)
+
+ def _set(self, cmd, key, val, time):
+ server, key = self._get_server(key)
+ if not server:
+ return 0
+
+ self._statlog(cmd)
+
+ flags = 0
+ if isinstance(val, types.StringTypes):
+ pass
+ elif isinstance(val, int):
+ flags |= Client._FLAG_INTEGER
+ val = "%d" % val
+ elif isinstance(val, long):
+ flags |= Client._FLAG_LONG
+ val = "%d" % val
+ elif self._usePickle:
+ flags |= Client._FLAG_PICKLE
+ val = pickle.dumps(val, 2)
+ else:
+ pass
+
+ fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, flags, time, len(val), val)
+ try:
+ server.send_cmd(fullcmd)
+ server.expect("STORED")
+ except socket.error, msg:
+ server.mark_dead(msg[1])
+ return 0
+ return 1
+
+ def get(self, key):
+ '''Retrieves a key from the memcache.
+
+ @return: The value or None.
+ '''
+ server, key = self._get_server(key)
+ if not server:
+ return None
+
+ self._statlog('get')
+
+ try:
+ server.send_cmd("get %s" % key)
+ rkey, flags, rlen, = self._expectvalue(server)
+ if not rkey:
+ return None
+ value = self._recv_value(server, flags, rlen)
+ server.expect("END")
+ except (_Error, socket.error), msg:
+ if type(msg) is types.TupleType:
+ msg = msg[1]
+ server.mark_dead(msg)
+ return None
+ return value
+
+ def get_multi(self, keys):
+ '''
+ Retrieves multiple keys from the memcache doing just one query.
+
+ >>> success = mc.set("foo", "bar")
+ >>> success = mc.set("baz", 42)
+ >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
+ 1
+
+ This method is recommended over regular L{get} as it lowers the number of
+ total packets flying around your network, reducing total latency, since
+ your app doesn\'t have to wait for each round-trip of L{get} before sending
+ the next one.
+
+ @param keys: An array of keys.
+ @return: A dictionary of key/value pairs that were available.
+
+ '''
+
+ self._statlog('get_multi')
+
+ server_keys = {}
+
+ # build up a list for each server of all the keys we want.
+ for key in keys:
+ server, key = self._get_server(key)
+ if not server:
+ continue
+ if not server_keys.has_key(server):
+ server_keys[server] = []
+ server_keys[server].append(key)
+
+ # send out all requests on each server before reading anything
+ dead_servers = []
+ for server in server_keys.keys():
+ try:
+ server.send_cmd("get %s" % " ".join(server_keys[server]))
+ except socket.error, msg:
+ server.mark_dead(msg[1])
+ dead_servers.append(server)
+
+ # if any servers died on the way, don't expect them to respond.
+ for server in dead_servers:
+ del server_keys[server]
+
+ retvals = {}
+ for server in server_keys.keys():
+ try:
+ line = server.readline()
+ while line and line != 'END':
+ rkey, flags, rlen = self._expectvalue(server, line)
+ # Bo Yang reports that this can sometimes be None
+ if rkey is not None:
+ val = self._recv_value(server, flags, rlen)
+ retvals[rkey] = val
+ line = server.readline()
+ except (_Error, socket.error), msg:
+ server.mark_dead(msg)
+ return retvals
+
+ def _expectvalue(self, server, line=None):
+ if not line:
+ line = server.readline()
+
+ if line[:5] == 'VALUE':
+ resp, rkey, flags, len = line.split()
+ flags = int(flags)
+ rlen = int(len)
+ return (rkey, flags, rlen)
+ else:
+ return (None, None, None)
+
+ def _recv_value(self, server, flags, rlen):
+ rlen += 2 # include \r\n
+ buf = server.recv(rlen)
+ if len(buf) != rlen:
+ raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
+
+ if len(buf) == rlen:
+ buf = buf[:-2] # strip \r\n
+
+ if flags == 0:
+ val = buf
+ elif flags & Client._FLAG_INTEGER:
+ val = int(buf)
+ elif flags & Client._FLAG_LONG:
+ val = long(buf)
+ elif self._usePickle and flags & Client._FLAG_PICKLE:
+ try:
+ val = pickle.loads(buf)
+ except:
+ self.debuglog('Pickle error...\n')
+ val = None
+ else:
+ self.debuglog("unknown flags on get: %x\n" % flags)
+
+ return val
+
+class _Host:
+ _DEAD_RETRY = 30 # number of seconds before retrying a dead server.
+
+ def __init__(self, host, debugfunc=None):
+ if isinstance(host, types.TupleType):
+ host = host[0]
+ self.weight = host[1]
+ else:
+ self.weight = 1
+
+ if host.find(":") > 0:
+ self.ip, self.port = host.split(":")
+ self.port = int(self.port)
+ else:
+ self.ip, self.port = host, 11211
+
+ if not debugfunc:
+ debugfunc = lambda x: x
+ self.debuglog = debugfunc
+
+ self.deaduntil = 0
+ self.socket = None
+
+ def _check_dead(self):
+ if self.deaduntil and self.deaduntil > time.time():
+ return 1
+ self.deaduntil = 0
+ return 0
+
+ def connect(self):
+ if self._get_socket():
+ return 1
+ return 0
+
+ def mark_dead(self, reason):
+ self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
+ self.deaduntil = time.time() + _Host._DEAD_RETRY
+ self.close_socket()
+
+ def _get_socket(self):
+ if self._check_dead():
+ return None
+ if self.socket:
+ return self.socket
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ # Python 2.3-ism: s.settimeout(1)
+ try:
+ s.connect((self.ip, self.port))
+ except socket.error, msg:
+ self.mark_dead("connect: %s" % msg[1])
+ return None
+ self.socket = s
+ return s
+
+ def close_socket(self):
+ if self.socket:
+ self.socket.close()
+ self.socket = None
+
+ def send_cmd(self, cmd):
+ if len(cmd) > 100:
+ self.socket.sendall(cmd)
+ self.socket.sendall('\r\n')
+ else:
+ self.socket.sendall(cmd + '\r\n')
+
+ def readline(self):
+ buffers = ''
+ recv = self.socket.recv
+ while 1:
+ data = recv(1)
+ if not data:
+ self.mark_dead('Connection closed while reading from %s'
+ % repr(self))
+ break
+ if data == '\n' and buffers and buffers[-1] == '\r':
+ return(buffers[:-1])
+ buffers = buffers + data
+ return(buffers)
+
+ def expect(self, text):
+ line = self.readline()
+ if line != text:
+ self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
+ return line
+
+ def recv(self, rlen):
+ buf = ''
+ recv = self.socket.recv
+ while len(buf) < rlen:
+ buf = buf + recv(rlen - len(buf))
+ return buf
+
+ def __str__(self):
+ d = ''
+ if self.deaduntil:
+ d = " (dead until %d)" % self.deaduntil
+ return "%s:%d%s" % (self.ip, self.port, d)
+
+def _doctest():
+ import doctest, memcache
+ servers = ["127.0.0.1:11211"]
+ mc = Client(servers, debug=1)
+ globs = {"mc": mc}
+ return doctest.testmod(memcache, globs=globs)
+
+if __name__ == "__main__":
+ print "Testing docstrings..."
+ _doctest()
+ print "Running tests:"
+ print
+ #servers = ["127.0.0.1:11211", "127.0.0.1:11212"]
+ servers = ["127.0.0.1:11211"]
+ mc = Client(servers, debug=1)
+
+ def to_s(val):
+ if not isinstance(val, types.StringTypes):
+ return "%s (%s)" % (val, type(val))
+ return "%s" % val
+ def test_setget(key, val):
+ print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
+ mc.set(key, val)
+ newval = mc.get(key)
+ if newval == val:
+ print "OK"
+ return 1
+ else:
+ print "FAIL"
+ return 0
+
+ class FooStruct:
+ def __init__(self):
+ self.bar = "baz"
+ def __str__(self):
+ return "A FooStruct"
+ def __eq__(self, other):
+ if isinstance(other, FooStruct):
+ return self.bar == other.bar
+ return 0
+
+ test_setget("a_string", "some random string")
+ test_setget("an_integer", 42)
+ if test_setget("long", long(1<<30)):
+ print "Testing delete ...",
+ if mc.delete("long"):
+ print "OK"
+ else:
+ print "FAIL"
+ print "Testing get_multi ...",
+ print mc.get_multi(["a_string", "an_integer"])
+
+ print "Testing get(unknown value) ...",
+ print to_s(mc.get("unknown_value"))
+
+ f = FooStruct()
+ test_setget("foostruct", f)
+
+ print "Testing incr ...",
+ x = mc.incr("an_integer", 1)
+ if x == 43:
+ print "OK"
+ else:
+ print "FAIL"
+
+ print "Testing decr ...",
+ x = mc.decr("an_integer", 1)
+ if x == 42:
+ print "OK"
+ else:
+ print "FAIL"
+
+
+
+# vim: ts=4 sw=4 et :
diff --git a/cheetah/Utils/statprof.py b/cheetah/Utils/statprof.py
new file mode 100644
index 0000000..55638eb
--- /dev/null
+++ b/cheetah/Utils/statprof.py
@@ -0,0 +1,304 @@
+## statprof.py
+## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
+## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
+
+## This library is free software; you can redistribute it and/or
+## modify it under the terms of the GNU Lesser General Public
+## License as published by the Free Software Foundation; either
+## version 2.1 of the License, or (at your option) any later version.
+##
+## This library is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## Lesser General Public License for more details.
+##
+## You should have received a copy of the GNU Lesser General Public
+## License along with this program; if not, contact:
+##
+## Free Software Foundation Voice: +1-617-542-5942
+## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
+## Boston, MA 02111-1307, USA gnu@gnu.org
+
+"""
+statprof is intended to be a fairly simple statistical profiler for
+python. It was ported directly from a statistical profiler for guile,
+also named statprof, available from guile-lib [0].
+
+[0] http://wingolog.org/software/guile-lib/statprof/
+
+To start profiling, call statprof.start():
+>>> start()
+
+Then run whatever it is that you want to profile, for example:
+>>> import test.pystone; test.pystone.pystones()
+
+Then stop the profiling and print out the results:
+>>> stop()
+>>> display()
+ % cumulative self
+ time seconds seconds name
+ 26.72 1.40 0.37 pystone.py:79:Proc0
+ 13.79 0.56 0.19 pystone.py:133:Proc1
+ 13.79 0.19 0.19 pystone.py:208:Proc8
+ 10.34 0.16 0.14 pystone.py:229:Func2
+ 6.90 0.10 0.10 pystone.py:45:__init__
+ 4.31 0.16 0.06 pystone.py:53:copy
+ ...
+
+All of the numerical data with the exception of the calls column is
+statistically approximate. In the following column descriptions, and
+in all of statprof, "time" refers to execution time (both user and
+system), not wall clock time.
+
+% time
+ The percent of the time spent inside the procedure itself (not
+ counting children).
+
+cumulative seconds
+ The total number of seconds spent in the procedure, including
+ children.
+
+self seconds
+ The total number of seconds spent in the procedure itself (not
+ counting children).
+
+name
+ The name of the procedure.
+
+By default statprof keeps the data collected from previous runs. If you
+want to clear the collected data, call reset():
+>>> reset()
+
+reset() can also be used to change the sampling frequency. For example,
+to tell statprof to sample 50 times a second:
+>>> reset(50)
+
+This means that statprof will sample the call stack after every 1/50 of
+a second of user + system time spent running on behalf of the python
+process. When your process is idle (for example, blocking in a read(),
+as is the case at the listener), the clock does not advance. For this
+reason statprof is not currently not suitable for profiling io-bound
+operations.
+
+The profiler uses the hash of the code object itself to identify the
+procedures, so it won't confuse different procedures with the same name.
+They will show up as two different rows in the output.
+
+Right now the profiler is quite simplistic. I cannot provide
+call-graphs or other higher level information. What you see in the
+table is pretty much all there is. Patches are welcome :-)
+
+
+Threading
+---------
+
+Because signals only get delivered to the main thread in Python,
+statprof only profiles the main thread. However because the time
+reporting function uses per-process timers, the results can be
+significantly off if other threads' work patterns are not similar to the
+main thread's work patterns.
+
+
+Implementation notes
+--------------------
+
+The profiler works by setting the unix profiling signal ITIMER_PROF to
+go off after the interval you define in the call to reset(). When the
+signal fires, a sampling routine is run which looks at the current
+procedure that's executing, and then crawls up the stack, and for each
+frame encountered, increments that frame's code object's sample count.
+Note that if a procedure is encountered multiple times on a given stack,
+it is only counted once. After the sampling is complete, the profiler
+resets profiling timer to fire again after the appropriate interval.
+
+Meanwhile, the profiler keeps track, via os.times(), how much CPU time
+(system and user -- which is also what ITIMER_PROF tracks), has elapsed
+while code has been executing within a start()/stop() block.
+
+The profiler also tries to avoid counting or timing its own code as
+much as possible.
+"""
+
+
+from __future__ import division
+
+try:
+ import itimer
+except ImportError:
+ raise ImportError('''statprof requires the itimer python extension.
+To install it, enter the following commands from a terminal:
+
+wget http://www.cute.fi/~torppa/py-itimer/py-itimer.tar.gz
+tar zxvf py-itimer.tar.gz
+cd py-itimer
+sudo python setup.py install
+''')
+
+import signal
+import os
+
+
+__all__ = ['start', 'stop', 'reset', 'display']
+
+
+###########################################################################
+## Utils
+
+def clock():
+ times = os.times()
+ return times[0] + times[1]
+
+
+###########################################################################
+## Collection data structures
+
+class ProfileState(object):
+ def __init__(self, frequency=None):
+ self.reset(frequency)
+
+ def reset(self, frequency=None):
+ # total so far
+ self.accumulated_time = 0.0
+ # start_time when timer is active
+ self.last_start_time = None
+ # total count of sampler calls
+ self.sample_count = 0
+ # a float
+ if frequency:
+ self.sample_interval = 1.0/frequency
+ elif not hasattr(self, 'sample_interval'):
+ # default to 100 Hz
+ self.sample_interval = 1.0/100.0
+ else:
+ # leave the frequency as it was
+ pass
+ self.remaining_prof_time = None
+ # for user start/stop nesting
+ self.profile_level = 0
+ # whether to catch apply-frame
+ self.count_calls = False
+ # gc time between start() and stop()
+ self.gc_time_taken = 0
+
+ def accumulate_time(self, stop_time):
+ self.accumulated_time += stop_time - self.last_start_time
+
+state = ProfileState()
+
+## call_data := { code object: CallData }
+call_data = {}
+class CallData(object):
+ def __init__(self, code):
+ self.name = code.co_name
+ self.filename = code.co_filename
+ self.lineno = code.co_firstlineno
+ self.call_count = 0
+ self.cum_sample_count = 0
+ self.self_sample_count = 0
+ call_data[code] = self
+
+def get_call_data(code):
+ return call_data.get(code, None) or CallData(code)
+
+
+###########################################################################
+## SIGPROF handler
+
+def sample_stack_procs(frame):
+ state.sample_count += 1
+ get_call_data(frame.f_code).self_sample_count += 1
+
+ code_seen = {}
+ while frame:
+ code_seen[frame.f_code] = True
+ frame = frame.f_back
+ for code in code_seen.iterkeys():
+ get_call_data(code).cum_sample_count += 1
+
+def profile_signal_handler(signum, frame):
+ if state.profile_level > 0:
+ state.accumulate_time(clock())
+ sample_stack_procs(frame)
+ itimer.setitimer(itimer.ITIMER_PROF,
+ state.sample_interval, 0.0)
+ state.last_start_time = clock()
+
+
+###########################################################################
+## Profiling API
+
+def is_active():
+ return state.profile_level > 0
+
+def start():
+ state.profile_level += 1
+ if state.profile_level == 1:
+ state.last_start_time = clock()
+ rpt = state.remaining_prof_time
+ state.remaining_prof_time = None
+ signal.signal(signal.SIGPROF, profile_signal_handler)
+ itimer.setitimer(itimer.ITIMER_PROF,
+ rpt or state.sample_interval, 0.0)
+ state.gc_time_taken = 0 # dunno
+
+def stop():
+ state.profile_level -= 1
+ if state.profile_level == 0:
+ state.accumulate_time(clock())
+ state.last_start_time = None
+ rpt = itimer.setitimer(itimer.ITIMER_PROF, 0.0, 0.0)
+ signal.signal(signal.SIGPROF, signal.SIG_IGN)
+ state.remaining_prof_time = rpt[0]
+ state.gc_time_taken = 0 # dunno
+
+def reset(frequency=None):
+ assert state.profile_level == 0, "Can't reset() while statprof is running"
+ call_data.clear()
+ state.reset(frequency)
+
+
+###########################################################################
+## Reporting API
+
+class CallStats(object):
+ def __init__(self, call_data):
+ self_samples = call_data.self_sample_count
+ cum_samples = call_data.cum_sample_count
+ nsamples = state.sample_count
+ secs_per_sample = state.accumulated_time / nsamples
+ basename = os.path.basename(call_data.filename)
+
+ self.name = '%s:%d:%s' % (basename, call_data.lineno, call_data.name)
+ self.pcnt_time_in_proc = self_samples / nsamples * 100
+ self.cum_secs_in_proc = cum_samples * secs_per_sample
+ self.self_secs_in_proc = self_samples * secs_per_sample
+ self.num_calls = None
+ self.self_secs_per_call = None
+ self.cum_secs_per_call = None
+
+ def display(self):
+ print '%6.2f %9.2f %9.2f %s' % (self.pcnt_time_in_proc,
+ self.cum_secs_in_proc,
+ self.self_secs_in_proc,
+ self.name)
+
+
+def display():
+ if state.sample_count == 0:
+ print 'No samples recorded.'
+ return
+
+ l = [CallStats(x) for x in call_data.itervalues()]
+ l = [(x.self_secs_in_proc, x.cum_secs_in_proc, x) for x in l]
+ l.sort(reverse=True)
+ l = [x[2] for x in l]
+
+ print '%5.5s %10.10s %7.7s %-8.8s' % ('% ', 'cumulative', 'self', '')
+ print '%5.5s %9.9s %8.8s %-8.8s' % ("time", "seconds", "seconds", "name")
+
+ for x in l:
+ x.display()
+
+ print '---'
+ print 'Sample count: %d' % state.sample_count
+ print 'Total time: %f seconds' % state.accumulated_time