summaryrefslogtreecommitdiff
path: root/paste
diff options
context:
space:
mode:
authorNeil Williams <neil@reddit.com>2011-12-23 02:01:05 -0800
committerNeil Williams <neil@reddit.com>2011-12-23 02:01:05 -0800
commit00d96965060a62e226fa1e105cefab3dde5a78c7 (patch)
treecdbc3600409af497ea3d484f48af7cd192c6b2f7 /paste
downloadpaste-git-00d96965060a62e226fa1e105cefab3dde5a78c7.tar.gz
Add HTTP 429 "Too Many Requests"
http://www.ietf.org/id/draft-nottingham-http-new-status-03.txt
Diffstat (limited to 'paste')
-rw-r--r--paste/__init__.py17
-rw-r--r--paste/auth/__init__.py9
-rw-r--r--paste/auth/auth_tkt.py396
-rw-r--r--paste/auth/basic.py122
-rw-r--r--paste/auth/cas.py99
-rw-r--r--paste/auth/cookie.py396
-rw-r--r--paste/auth/digest.py214
-rw-r--r--paste/auth/form.py149
-rw-r--r--paste/auth/grantip.py113
-rw-r--r--paste/auth/multi.py79
-rw-r--r--paste/auth/open_id.py412
-rw-r--r--paste/cascade.py133
-rw-r--r--paste/cgiapp.py277
-rw-r--r--paste/cgitb_catcher.py116
-rw-r--r--paste/config.py120
-rw-r--r--paste/cowbell/__init__.py104
-rw-r--r--paste/cowbell/bell-ascending.pngbin0 -> 132993 bytes
-rw-r--r--paste/cowbell/bell-descending.pngbin0 -> 124917 bytes
-rw-r--r--paste/debug/__init__.py5
-rwxr-xr-xpaste/debug/debugapp.py79
-rwxr-xr-xpaste/debug/doctest_webapp.py435
-rw-r--r--paste/debug/fsdiff.py409
-rw-r--r--paste/debug/prints.py148
-rw-r--r--paste/debug/profile.py227
-rwxr-xr-xpaste/debug/testserver.py93
-rw-r--r--paste/debug/watchthreads.py347
-rw-r--r--paste/debug/wdg_validate.py121
-rw-r--r--paste/errordocument.py383
-rw-r--r--paste/evalexception/__init__.py7
-rw-r--r--paste/evalexception/evalcontext.py68
-rw-r--r--paste/evalexception/media/MochiKit.packed.js7829
-rw-r--r--paste/evalexception/media/debug.js161
-rw-r--r--paste/evalexception/media/minus.jpgbin0 -> 359 bytes
-rw-r--r--paste/evalexception/media/plus.jpgbin0 -> 361 bytes
-rw-r--r--paste/evalexception/middleware.py610
-rw-r--r--paste/exceptions/__init__.py6
-rw-r--r--paste/exceptions/collector.py526
-rw-r--r--paste/exceptions/errormiddleware.py460
-rw-r--r--paste/exceptions/formatter.py564
-rw-r--r--paste/exceptions/reporter.py141
-rw-r--r--paste/exceptions/serial_number_generator.py123
-rw-r--r--paste/fileapp.py354
-rw-r--r--paste/fixture.py1725
-rw-r--r--paste/flup_session.py108
-rw-r--r--paste/gzipper.py111
-rw-r--r--paste/httpexceptions.py666
-rw-r--r--paste/httpheaders.py1097
-rwxr-xr-xpaste/httpserver.py1410
-rw-r--r--paste/lint.py436
-rw-r--r--paste/modpython.py252
-rw-r--r--paste/pony.py57
-rwxr-xr-xpaste/progress.py222
-rw-r--r--paste/proxy.py283
-rw-r--r--paste/recursive.py405
-rw-r--r--paste/registry.py581
-rw-r--r--paste/reloader.py178
-rw-r--r--paste/request.py411
-rw-r--r--paste/response.py240
-rw-r--r--paste/session.py337
-rw-r--r--paste/transaction.py120
-rw-r--r--paste/translogger.py121
-rw-r--r--paste/url.py475
-rw-r--r--paste/urlmap.py250
-rw-r--r--paste/urlparser.py638
-rw-r--r--paste/util/PySourceColor.py2103
-rw-r--r--paste/util/UserDict24.py167
-rw-r--r--paste/util/__init__.py4
-rw-r--r--paste/util/classinit.py42
-rw-r--r--paste/util/classinstance.py38
-rw-r--r--paste/util/converters.py26
-rw-r--r--paste/util/dateinterval.py103
-rw-r--r--paste/util/datetimeutil.py361
-rw-r--r--paste/util/doctest24.py2665
-rw-r--r--paste/util/filemixin.py53
-rw-r--r--paste/util/finddata.py99
-rw-r--r--paste/util/findpackage.py26
-rw-r--r--paste/util/import_string.py95
-rw-r--r--paste/util/intset.py511
-rw-r--r--paste/util/ip4.py273
-rw-r--r--paste/util/killthread.py30
-rw-r--r--paste/util/looper.py152
-rw-r--r--paste/util/mimeparse.py160
-rw-r--r--paste/util/multidict.py397
-rw-r--r--paste/util/quoting.py98
-rw-r--r--paste/util/scgiserver.py171
-rw-r--r--paste/util/string24.py531
-rw-r--r--paste/util/subprocess24.py1152
-rw-r--r--paste/util/template.py758
-rw-r--r--paste/util/threadedprint.py250
-rw-r--r--paste/util/threadinglocal.py43
-rw-r--r--paste/wsgilib.py597
-rw-r--r--paste/wsgiwrappers.py582
92 files changed, 37462 insertions, 0 deletions
diff --git a/paste/__init__.py b/paste/__init__.py
new file mode 100644
index 0000000..ba66606
--- /dev/null
+++ b/paste/__init__.py
@@ -0,0 +1,17 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+try:
+ import pkg_resources
+ pkg_resources.declare_namespace(__name__)
+except ImportError:
+ # don't prevent use of paste if pkg_resources isn't installed
+ from pkgutil import extend_path
+ __path__ = extend_path(__path__, __name__)
+
+try:
+ import modulefinder
+except ImportError:
+ pass
+else:
+ for p in __path__:
+ modulefinder.AddPackagePath(__name__, p)
diff --git a/paste/auth/__init__.py b/paste/auth/__init__.py
new file mode 100644
index 0000000..186e2ef
--- /dev/null
+++ b/paste/auth/__init__.py
@@ -0,0 +1,9 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Package for authentication/identification of requests.
+
+The objective of this package is to provide single-focused middleware
+components that implement a particular specification. Integration of
+the components into a usable system is up to a higher-level framework.
+"""
diff --git a/paste/auth/auth_tkt.py b/paste/auth/auth_tkt.py
new file mode 100644
index 0000000..8dfce00
--- /dev/null
+++ b/paste/auth/auth_tkt.py
@@ -0,0 +1,396 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+##########################################################################
+#
+# Copyright (c) 2005 Imaginary Landscape LLC and Contributors.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+##########################################################################
+"""
+Implementation of cookie signing as done in `mod_auth_tkt
+<http://www.openfusion.com.au/labs/mod_auth_tkt/>`_.
+
+mod_auth_tkt is an Apache module that looks for these signed cookies
+and sets ``REMOTE_USER``, ``REMOTE_USER_TOKENS`` (a comma-separated
+list of groups) and ``REMOTE_USER_DATA`` (arbitrary string data).
+
+This module is an alternative to the ``paste.auth.cookie`` module;
+it's primary benefit is compatibility with mod_auth_tkt, which in turn
+makes it possible to use the same authentication process with
+non-Python code run under Apache.
+"""
+
+import time as time_mod
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+import Cookie
+from paste import request
+from urllib import quote as url_quote
+from urllib import unquote as url_unquote
+
+
+class AuthTicket(object):
+
+ """
+ This class represents an authentication token. You must pass in
+ the shared secret, the userid, and the IP address. Optionally you
+ can include tokens (a list of strings, representing role names),
+ 'user_data', which is arbitrary data available for your own use in
+ later scripts. Lastly, you can override the cookie name and
+ timestamp.
+
+ Once you provide all the arguments, use .cookie_value() to
+ generate the appropriate authentication ticket. .cookie()
+ generates a Cookie object, the str() of which is the complete
+ cookie header to be sent.
+
+ CGI usage::
+
+ token = auth_tkt.AuthTick('sharedsecret', 'username',
+ os.environ['REMOTE_ADDR'], tokens=['admin'])
+ print 'Status: 200 OK'
+ print 'Content-type: text/html'
+ print token.cookie()
+ print
+ ... redirect HTML ...
+
+ Webware usage::
+
+ token = auth_tkt.AuthTick('sharedsecret', 'username',
+ self.request().environ()['REMOTE_ADDR'], tokens=['admin'])
+ self.response().setCookie('auth_tkt', token.cookie_value())
+
+ Be careful not to do an HTTP redirect after login; use meta
+ refresh or Javascript -- some browsers have bugs where cookies
+ aren't saved when set on a redirect.
+ """
+
+ def __init__(self, secret, userid, ip, tokens=(), user_data='',
+ time=None, cookie_name='auth_tkt',
+ secure=False):
+ self.secret = secret
+ self.userid = userid
+ self.ip = ip
+ self.tokens = ','.join(tokens)
+ self.user_data = user_data
+ if time is None:
+ self.time = time_mod.time()
+ else:
+ self.time = time
+ self.cookie_name = cookie_name
+ self.secure = secure
+
+ def digest(self):
+ return calculate_digest(
+ self.ip, self.time, self.secret, self.userid, self.tokens,
+ self.user_data)
+
+ def cookie_value(self):
+ v = '%s%08x%s!' % (self.digest(), int(self.time), url_quote(self.userid))
+ if self.tokens:
+ v += self.tokens + '!'
+ v += self.user_data
+ return v
+
+ def cookie(self):
+ c = Cookie.SimpleCookie()
+ c[self.cookie_name] = self.cookie_value().encode('base64').strip().replace('\n', '')
+ c[self.cookie_name]['path'] = '/'
+ if self.secure:
+ c[self.cookie_name]['secure'] = 'true'
+ return c
+
+
+class BadTicket(Exception):
+ """
+ Exception raised when a ticket can't be parsed. If we get
+ far enough to determine what the expected digest should have
+ been, expected is set. This should not be shown by default,
+ but can be useful for debugging.
+ """
+ def __init__(self, msg, expected=None):
+ self.expected = expected
+ Exception.__init__(self, msg)
+
+
+def parse_ticket(secret, ticket, ip):
+ """
+ Parse the ticket, returning (timestamp, userid, tokens, user_data).
+
+ If the ticket cannot be parsed, ``BadTicket`` will be raised with
+ an explanation.
+ """
+ ticket = ticket.strip('"')
+ digest = ticket[:32]
+ try:
+ timestamp = int(ticket[32:40], 16)
+ except ValueError, e:
+ raise BadTicket('Timestamp is not a hex integer: %s' % e)
+ try:
+ userid, data = ticket[40:].split('!', 1)
+ except ValueError:
+ raise BadTicket('userid is not followed by !')
+ userid = url_unquote(userid)
+ if '!' in data:
+ tokens, user_data = data.split('!', 1)
+ else:
+ # @@: Is this the right order?
+ tokens = ''
+ user_data = data
+
+ expected = calculate_digest(ip, timestamp, secret,
+ userid, tokens, user_data)
+
+ if expected != digest:
+ raise BadTicket('Digest signature is not correct',
+ expected=(expected, digest))
+
+ tokens = tokens.split(',')
+
+ return (timestamp, userid, tokens, user_data)
+
+
+def calculate_digest(ip, timestamp, secret, userid, tokens, user_data):
+ secret = maybe_encode(secret)
+ userid = maybe_encode(userid)
+ tokens = maybe_encode(tokens)
+ user_data = maybe_encode(user_data)
+ digest0 = md5(
+ encode_ip_timestamp(ip, timestamp) + secret + userid + '\0'
+ + tokens + '\0' + user_data).hexdigest()
+ digest = md5(digest0 + secret).hexdigest()
+ return digest
+
+
+def encode_ip_timestamp(ip, timestamp):
+ ip_chars = ''.join(map(chr, map(int, ip.split('.'))))
+ t = int(timestamp)
+ ts = ((t & 0xff000000) >> 24,
+ (t & 0xff0000) >> 16,
+ (t & 0xff00) >> 8,
+ t & 0xff)
+ ts_chars = ''.join(map(chr, ts))
+ return ip_chars + ts_chars
+
+
+def maybe_encode(s, encoding='utf8'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding)
+ return s
+
+
+class AuthTKTMiddleware(object):
+
+ """
+ Middleware that checks for signed cookies that match what
+ `mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_
+ looks for (if you have mod_auth_tkt installed, you don't need this
+ middleware, since Apache will set the environmental variables for
+ you).
+
+ Arguments:
+
+ ``secret``:
+ A secret that should be shared by any instances of this application.
+ If this app is served from more than one machine, they should all
+ have the same secret.
+
+ ``cookie_name``:
+ The name of the cookie to read and write from. Default ``auth_tkt``.
+
+ ``secure``:
+ If the cookie should be set as 'secure' (only sent over SSL) and if
+ the login must be over SSL. (Defaults to False)
+
+ ``httponly``:
+ If the cookie should be marked as HttpOnly, which means that it's
+ not accessible to JavaScript. (Defaults to False)
+
+ ``include_ip``:
+ If the cookie should include the user's IP address. If so, then
+ if they change IPs their cookie will be invalid.
+
+ ``logout_path``:
+ The path under this middleware that should signify a logout. The
+ page will be shown as usual, but the user will also be logged out
+ when they visit this page.
+
+ If used with mod_auth_tkt, then these settings (except logout_path) should
+ match the analogous Apache configuration settings.
+
+ This also adds two functions to the request:
+
+ ``environ['paste.auth_tkt.set_user'](userid, tokens='', user_data='')``
+
+ This sets a cookie that logs the user in. ``tokens`` is a
+ string (comma-separated groups) or a list of strings.
+ ``user_data`` is a string for your own use.
+
+ ``environ['paste.auth_tkt.logout_user']()``
+
+ Logs out the user.
+ """
+
+ def __init__(self, app, secret, cookie_name='auth_tkt', secure=False,
+ include_ip=True, logout_path=None, httponly=False,
+ no_domain_cookie=True, current_domain_cookie=True,
+ wildcard_cookie=True):
+ self.app = app
+ self.secret = secret
+ self.cookie_name = cookie_name
+ self.secure = secure
+ self.httponly = httponly
+ self.include_ip = include_ip
+ self.logout_path = logout_path
+ self.no_domain_cookie = no_domain_cookie
+ self.current_domain_cookie = current_domain_cookie
+ self.wildcard_cookie = wildcard_cookie
+
+ def __call__(self, environ, start_response):
+ cookies = request.get_cookies(environ)
+ if self.cookie_name in cookies:
+ cookie_value = cookies[self.cookie_name].value
+ else:
+ cookie_value = ''
+ if cookie_value:
+ if self.include_ip:
+ remote_addr = environ['REMOTE_ADDR']
+ else:
+ # mod_auth_tkt uses this dummy value when IP is not
+ # checked:
+ remote_addr = '0.0.0.0'
+ # @@: This should handle bad signatures better:
+ # Also, timeouts should cause cookie refresh
+ try:
+ timestamp, userid, tokens, user_data = parse_ticket(
+ self.secret, cookie_value, remote_addr)
+ tokens = ','.join(tokens)
+ environ['REMOTE_USER'] = userid
+ if environ.get('REMOTE_USER_TOKENS'):
+ # We want to add tokens/roles to what's there:
+ tokens = environ['REMOTE_USER_TOKENS'] + ',' + tokens
+ environ['REMOTE_USER_TOKENS'] = tokens
+ environ['REMOTE_USER_DATA'] = user_data
+ environ['AUTH_TYPE'] = 'cookie'
+ except BadTicket:
+ # bad credentials, just ignore without logging the user
+ # in or anything
+ pass
+ set_cookies = []
+
+ def set_user(userid, tokens='', user_data=''):
+ set_cookies.extend(self.set_user_cookie(
+ environ, userid, tokens, user_data))
+
+ def logout_user():
+ set_cookies.extend(self.logout_user_cookie(environ))
+
+ environ['paste.auth_tkt.set_user'] = set_user
+ environ['paste.auth_tkt.logout_user'] = logout_user
+ if self.logout_path and environ.get('PATH_INFO') == self.logout_path:
+ logout_user()
+
+ def cookie_setting_start_response(status, headers, exc_info=None):
+ headers.extend(set_cookies)
+ return start_response(status, headers, exc_info)
+
+ return self.app(environ, cookie_setting_start_response)
+
+ def set_user_cookie(self, environ, userid, tokens, user_data):
+ if not isinstance(tokens, basestring):
+ tokens = ','.join(tokens)
+ if self.include_ip:
+ remote_addr = environ['REMOTE_ADDR']
+ else:
+ remote_addr = '0.0.0.0'
+ ticket = AuthTicket(
+ self.secret,
+ userid,
+ remote_addr,
+ tokens=tokens,
+ user_data=user_data,
+ cookie_name=self.cookie_name,
+ secure=self.secure)
+ # @@: Should we set REMOTE_USER etc in the current
+ # environment right now as well?
+ cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
+ wild_domain = '.' + cur_domain
+
+ cookie_options = ""
+ if self.secure:
+ cookie_options += "; secure"
+ if self.httponly:
+ cookie_options += "; HttpOnly"
+
+ cookies = []
+ if self.no_domain_cookie:
+ cookies.append(('Set-Cookie', '%s=%s; Path=/%s' % (
+ self.cookie_name, ticket.cookie_value(), cookie_options)))
+ if self.current_domain_cookie:
+ cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
+ self.cookie_name, ticket.cookie_value(), cur_domain,
+ cookie_options)))
+ if self.wildcard_cookie:
+ cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
+ self.cookie_name, ticket.cookie_value(), wild_domain,
+ cookie_options)))
+
+ return cookies
+
+ def logout_user_cookie(self, environ):
+ cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
+ wild_domain = '.' + cur_domain
+ expires = 'Sat, 01-Jan-2000 12:00:00 GMT'
+ cookies = [
+ ('Set-Cookie', '%s=""; Expires="%s"; Path=/' % (self.cookie_name, expires)),
+ ('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
+ (self.cookie_name, expires, cur_domain)),
+ ('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
+ (self.cookie_name, expires, wild_domain)),
+ ]
+ return cookies
+
+
+def make_auth_tkt_middleware(
+ app,
+ global_conf,
+ secret=None,
+ cookie_name='auth_tkt',
+ secure=False,
+ include_ip=True,
+ logout_path=None):
+ """
+ Creates the `AuthTKTMiddleware
+ <class-paste.auth.auth_tkt.AuthTKTMiddleware.html>`_.
+
+ ``secret`` is requird, but can be set globally or locally.
+ """
+ from paste.deploy.converters import asbool
+ secure = asbool(secure)
+ include_ip = asbool(include_ip)
+ if secret is None:
+ secret = global_conf.get('secret')
+ if not secret:
+ raise ValueError(
+ "You must provide a 'secret' (in global or local configuration)")
+ return AuthTKTMiddleware(
+ app, secret, cookie_name, secure, include_ip, logout_path or None)
diff --git a/paste/auth/basic.py b/paste/auth/basic.py
new file mode 100644
index 0000000..69db128
--- /dev/null
+++ b/paste/auth/basic.py
@@ -0,0 +1,122 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Basic HTTP/1.0 Authentication
+
+This module implements ``Basic`` authentication as described in
+HTTP/1.0 specification [1]_ . Do not use this module unless you
+are using SSL or need to work with very out-dated clients, instead
+use ``digest`` authentication.
+
+>>> from paste.wsgilib import dump_environ
+>>> from paste.httpserver import serve
+>>> # from paste.auth.basic import AuthBasicHandler
+>>> realm = 'Test Realm'
+>>> def authfunc(environ, username, password):
+... return username == password
+>>> serve(AuthBasicHandler(dump_environ, realm, authfunc))
+serving on...
+
+.. [1] http://www.w3.org/Protocols/HTTP/1.0/draft-ietf-http-spec.html#BasicAA
+"""
+from paste.httpexceptions import HTTPUnauthorized
+from paste.httpheaders import *
+
+class AuthBasicAuthenticator(object):
+ """
+ implements ``Basic`` authentication details
+ """
+ type = 'basic'
+ def __init__(self, realm, authfunc):
+ self.realm = realm
+ self.authfunc = authfunc
+
+ def build_authentication(self):
+ head = WWW_AUTHENTICATE.tuples('Basic realm="%s"' % self.realm)
+ return HTTPUnauthorized(headers=head)
+
+ def authenticate(self, environ):
+ authorization = AUTHORIZATION(environ)
+ if not authorization:
+ return self.build_authentication()
+ (authmeth, auth) = authorization.split(' ', 1)
+ if 'basic' != authmeth.lower():
+ return self.build_authentication()
+ auth = auth.strip().decode('base64')
+ username, password = auth.split(':', 1)
+ if self.authfunc(environ, username, password):
+ return username
+ return self.build_authentication()
+
+ __call__ = authenticate
+
+class AuthBasicHandler(object):
+ """
+ HTTP/1.0 ``Basic`` authentication middleware
+
+ Parameters:
+
+ ``application``
+
+ The application object is called only upon successful
+ authentication, and can assume ``environ['REMOTE_USER']``
+ is set. If the ``REMOTE_USER`` is already set, this
+ middleware is simply pass-through.
+
+ ``realm``
+
+ This is a identifier for the authority that is requesting
+ authorization. It is shown to the user and should be unique
+ within the domain it is being used.
+
+ ``authfunc``
+
+ This is a mandatory user-defined function which takes a
+ ``environ``, ``username`` and ``password`` for its first
+ three arguments. It should return ``True`` if the user is
+ authenticated.
+
+ """
+ def __init__(self, application, realm, authfunc):
+ self.application = application
+ self.authenticate = AuthBasicAuthenticator(realm, authfunc)
+
+ def __call__(self, environ, start_response):
+ username = REMOTE_USER(environ)
+ if not username:
+ result = self.authenticate(environ)
+ if isinstance(result, str):
+ AUTH_TYPE.update(environ, 'basic')
+ REMOTE_USER.update(environ, result)
+ else:
+ return result.wsgi_application(environ, start_response)
+ return self.application(environ, start_response)
+
+middleware = AuthBasicHandler
+
+__all__ = ['AuthBasicHandler']
+
+def make_basic(app, global_conf, realm, authfunc, **kw):
+ """
+ Grant access via basic authentication
+
+ Config looks like this::
+
+ [filter:grant]
+ use = egg:Paste#auth_basic
+ realm=myrealm
+ authfunc=somepackage.somemodule:somefunction
+
+ """
+ from paste.util.import_string import eval_import
+ import types
+ authfunc = eval_import(authfunc)
+ assert isinstance(authfunc, types.FunctionType), "authfunc must resolve to a function"
+ return AuthBasicHandler(app, realm, authfunc)
+
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
diff --git a/paste/auth/cas.py b/paste/auth/cas.py
new file mode 100644
index 0000000..c3521a0
--- /dev/null
+++ b/paste/auth/cas.py
@@ -0,0 +1,99 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+CAS 1.0 Authentication
+
+The Central Authentication System is a straight-forward single sign-on
+mechanism developed by Yale University's ITS department. It has since
+enjoyed widespread success and is deployed at many major universities
+and some corporations.
+
+ https://clearinghouse.ja-sig.org/wiki/display/CAS/Home
+ http://www.yale.edu/tp/auth/usingcasatyale.html
+
+This implementation has the goal of maintaining current path arguments
+passed to the system so that it can be used as middleware at any stage
+of processing. It has the secondary goal of allowing for other
+authentication methods to be used concurrently.
+"""
+import urllib
+from paste.request import construct_url
+from paste.httpexceptions import HTTPSeeOther, HTTPForbidden
+
+class CASLoginFailure(HTTPForbidden):
+ """ The exception raised if the authority returns 'no' """
+
+class CASAuthenticate(HTTPSeeOther):
+ """ The exception raised to authenticate the user """
+
+def AuthCASHandler(application, authority):
+ """
+ middleware to implement CAS 1.0 authentication
+
+ There are several possible outcomes:
+
+ 0. If the REMOTE_USER environment variable is already populated;
+ then this middleware is a no-op, and the request is passed along
+ to the application.
+
+ 1. If a query argument 'ticket' is found, then an attempt to
+ validate said ticket /w the authentication service done. If the
+ ticket is not validated; an 403 'Forbidden' exception is raised.
+ Otherwise, the REMOTE_USER variable is set with the NetID that
+ was validated and AUTH_TYPE is set to "cas".
+
+ 2. Otherwise, a 303 'See Other' is returned to the client directing
+ them to login using the CAS service. After logon, the service
+ will send them back to this same URL, only with a 'ticket' query
+ argument.
+
+ Parameters:
+
+ ``authority``
+
+ This is a fully-qualified URL to a CAS 1.0 service. The URL
+ should end with a '/' and have the 'login' and 'validate'
+ sub-paths as described in the CAS 1.0 documentation.
+
+ """
+ assert authority.endswith("/") and authority.startswith("http")
+ def cas_application(environ, start_response):
+ username = environ.get('REMOTE_USER','')
+ if username:
+ return application(environ, start_response)
+ qs = environ.get('QUERY_STRING','').split("&")
+ if qs and qs[-1].startswith("ticket="):
+ # assume a response from the authority
+ ticket = qs.pop().split("=", 1)[1]
+ environ['QUERY_STRING'] = "&".join(qs)
+ service = construct_url(environ)
+ args = urllib.urlencode(
+ {'service': service,'ticket': ticket})
+ requrl = authority + "validate?" + args
+ result = urllib.urlopen(requrl).read().split("\n")
+ if 'yes' == result[0]:
+ environ['REMOTE_USER'] = result[1]
+ environ['AUTH_TYPE'] = 'cas'
+ return application(environ, start_response)
+ exce = CASLoginFailure()
+ else:
+ service = construct_url(environ)
+ args = urllib.urlencode({'service': service})
+ location = authority + "login?" + args
+ exce = CASAuthenticate(location)
+ return exce.wsgi_application(environ, start_response)
+ return cas_application
+
+middleware = AuthCASHandler
+
+__all__ = ['CASLoginFailure', 'CASAuthenticate', 'AuthCASHandler' ]
+
+if '__main__' == __name__:
+ authority = "https://secure.its.yale.edu/cas/servlet/"
+ from paste.wsgilib import dump_environ
+ from paste.httpserver import serve
+ from paste.httpexceptions import *
+ serve(HTTPExceptionHandler(
+ AuthCASHandler(dump_environ, authority)))
diff --git a/paste/auth/cookie.py b/paste/auth/cookie.py
new file mode 100644
index 0000000..2601a90
--- /dev/null
+++ b/paste/auth/cookie.py
@@ -0,0 +1,396 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Cookie "Saved" Authentication
+
+This authentication middleware saves the current REMOTE_USER,
+REMOTE_SESSION, and any other environment variables specified in a
+cookie so that it can be retrieved during the next request without
+requiring re-authentication. This uses a session cookie on the client
+side (so it goes away when the user closes their window) and does
+server-side expiration.
+
+Following is a very simple example where a form is presented asking for
+a user name (no actual checking), and dummy session identifier (perhaps
+corresponding to a database session id) is stored in the cookie.
+
+::
+
+ >>> from paste.httpserver import serve
+ >>> from paste.fileapp import DataApp
+ >>> from paste.httpexceptions import *
+ >>> from paste.auth.cookie import AuthCookieHandler
+ >>> from paste.wsgilib import parse_querystring
+ >>> def testapp(environ, start_response):
+ ... user = dict(parse_querystring(environ)).get('user','')
+ ... if user:
+ ... environ['REMOTE_USER'] = user
+ ... environ['REMOTE_SESSION'] = 'a-session-id'
+ ... if environ.get('REMOTE_USER'):
+ ... page = '<html><body>Welcome %s (%s)</body></html>'
+ ... page %= (environ['REMOTE_USER'], environ['REMOTE_SESSION'])
+ ... else:
+ ... page = ('<html><body><form><input name="user" />'
+ ... '<input type="submit" /></form></body></html>')
+ ... return DataApp(page, content_type="text/html")(
+ ... environ, start_response)
+ >>> serve(AuthCookieHandler(testapp))
+ serving on...
+
+"""
+
+import hmac, base64, random, time, warnings
+try:
+ from hashlib import sha1
+except ImportError:
+ # NOTE: We have to use the callable with hashlib (hashlib.sha1),
+ # otherwise hmac only accepts the sha module object itself
+ import sha as sha1
+from paste.request import get_cookies
+
+def make_time(value):
+ return time.strftime("%Y%m%d%H%M", time.gmtime(value))
+_signature_size = len(hmac.new('x', 'x', sha1).digest())
+_header_size = _signature_size + len(make_time(time.time()))
+
+# @@: Should this be using urllib.quote?
+# build encode/decode functions to safely pack away values
+_encode = [('\\', '\\x5c'), ('"', '\\x22'),
+ ('=', '\\x3d'), (';', '\\x3b')]
+_decode = [(v, k) for (k, v) in _encode]
+_decode.reverse()
+def encode(s, sublist = _encode):
+ return reduce((lambda a, (b, c): a.replace(b, c)), sublist, str(s))
+decode = lambda s: encode(s, _decode)
+
+class CookieTooLarge(RuntimeError):
+ def __init__(self, content, cookie):
+ RuntimeError.__init__("Signed cookie exceeds maximum size of 4096")
+ self.content = content
+ self.cookie = cookie
+
+_all_chars = ''.join([chr(x) for x in range(0, 255)])
+def new_secret():
+ """ returns a 64 byte secret """
+ return ''.join(random.sample(_all_chars, 64))
+
+class AuthCookieSigner(object):
+ """
+ save/restore ``environ`` entries via digially signed cookie
+
+ This class converts content into a timed and digitally signed
+ cookie, as well as having the facility to reverse this procedure.
+ If the cookie, after the content is encoded and signed exceeds the
+ maximum length (4096), then CookieTooLarge exception is raised.
+
+ The timeout of the cookie is handled on the server side for a few
+ reasons. First, if a 'Expires' directive is added to a cookie, then
+ the cookie becomes persistent (lasting even after the browser window
+ has closed). Second, the user's clock may be wrong (perhaps
+ intentionally). The timeout is specified in minutes; and expiration
+ date returned is rounded to one second.
+
+ Constructor Arguments:
+
+ ``secret``
+
+ This is a secret key if you want to syncronize your keys so
+ that the cookie will be good across a cluster of computers.
+ It is recommended via the HMAC specification (RFC 2104) that
+ the secret key be 64 bytes since this is the block size of
+ the hashing. If you do not provide a secret key, a random
+ one is generated each time you create the handler; this
+ should be sufficient for most cases.
+
+ ``timeout``
+
+ This is the time (in minutes) from which the cookie is set
+ to expire. Note that on each request a new (replacement)
+ cookie is sent, hence this is effectively a session timeout
+ parameter for your entire cluster. If you do not provide a
+ timeout, it is set at 30 minutes.
+
+ ``maxlen``
+
+ This is the maximum size of the *signed* cookie; hence the
+ actual content signed will be somewhat less. If the cookie
+ goes over this size, a ``CookieTooLarge`` exception is
+ raised so that unexpected handling of cookies on the client
+ side are avoided. By default this is set at 4k (4096 bytes),
+ which is the standard cookie size limit.
+
+ """
+ def __init__(self, secret = None, timeout = None, maxlen = None):
+ self.timeout = timeout or 30
+ if isinstance(timeout, basestring):
+ raise ValueError(
+ "Timeout must be a number (minutes), not a string (%r)"
+ % timeout)
+ self.maxlen = maxlen or 4096
+ self.secret = secret or new_secret()
+
+ def sign(self, content):
+ """
+ Sign the content returning a valid cookie (that does not
+ need to be escaped and quoted). The expiration of this
+ cookie is handled server-side in the auth() function.
+ """
+ cookie = base64.encodestring(
+ hmac.new(self.secret, content, sha1).digest() +
+ make_time(time.time() + 60*self.timeout) +
+ content)
+ cookie = cookie.replace("/", "_").replace("=", "~")
+ cookie = cookie.replace('\n', '').replace('\r', '')
+ if len(cookie) > self.maxlen:
+ raise CookieTooLarge(content, cookie)
+ return cookie
+
+ def auth(self, cookie):
+ """
+ Authenticate the cooke using the signature, verify that it
+ has not expired; and return the cookie's content
+ """
+ decode = base64.decodestring(
+ cookie.replace("_", "/").replace("~", "="))
+ signature = decode[:_signature_size]
+ expires = decode[_signature_size:_header_size]
+ content = decode[_header_size:]
+ if signature == hmac.new(self.secret, content, sha1).digest():
+ if int(expires) > int(make_time(time.time())):
+ return content
+ else:
+ # This is the normal case of an expired cookie; just
+ # don't bother doing anything here.
+ pass
+ else:
+ # This case can happen if the server is restarted with a
+ # different secret; or if the user's IP address changed
+ # due to a proxy. However, it could also be a break-in
+ # attempt -- so should it be reported?
+ pass
+
+class AuthCookieEnviron(list):
+ """
+ a list of environment keys to be saved via cookie
+
+ An instance of this object, found at ``environ['paste.auth.cookie']``
+ lists the `environ` keys that were restored from or will be added
+ to the digially signed cookie. This object can be accessed from an
+ `environ` variable by using this module's name.
+ """
+ def __init__(self, handler, scanlist):
+ list.__init__(self, scanlist)
+ self.handler = handler
+ def append(self, value):
+ if value in self:
+ return
+ list.append(self, str(value))
+
+class AuthCookieHandler(object):
+ """
+ the actual handler that should be put in your middleware stack
+
+ This middleware uses cookies to stash-away a previously authenticated
+ user (and perhaps other variables) so that re-authentication is not
+ needed. This does not implement sessions; and therefore N servers
+ can be syncronized to accept the same saved authentication if they
+ all use the same cookie_name and secret.
+
+ By default, this handler scans the `environ` for the REMOTE_USER
+ and REMOTE_SESSION key; if found, it is stored. It can be
+ configured to scan other `environ` keys as well -- but be careful
+ not to exceed 2-3k (so that the encoded and signed cookie does not
+ exceed 4k). You can ask it to handle other environment variables
+ by doing:
+
+ ``environ['paste.auth.cookie'].append('your.environ.variable')``
+
+
+ Constructor Arguments:
+
+ ``application``
+
+ This is the wrapped application which will have access to
+ the ``environ['REMOTE_USER']`` restored by this middleware.
+
+ ``cookie_name``
+
+ The name of the cookie used to store this content, by default
+ it is ``PASTE_AUTH_COOKIE``.
+
+ ``scanlist``
+
+ This is the initial set of ``environ`` keys to
+ save/restore to the signed cookie. By default is consists
+ only of ``REMOTE_USER`` and ``REMOTE_SESSION``; any tuple
+ or list of environment keys will work. However, be
+ careful, as the total saved size is limited to around 3k.
+
+ ``signer``
+
+ This is the signer object used to create the actual cookie
+ values, by default, it is ``AuthCookieSigner`` and is passed
+ the remaining arguments to this function: ``secret``,
+ ``timeout``, and ``maxlen``.
+
+ At this time, each cookie is individually signed. To store more
+ than the 4k of data; it is possible to sub-class this object to
+ provide different ``environ_name`` and ``cookie_name``
+ """
+ environ_name = 'paste.auth.cookie'
+ cookie_name = 'PASTE_AUTH_COOKIE'
+ signer_class = AuthCookieSigner
+ environ_class = AuthCookieEnviron
+
+ def __init__(self, application, cookie_name=None, scanlist=None,
+ signer=None, secret=None, timeout=None, maxlen=None):
+ if not signer:
+ signer = self.signer_class(secret, timeout, maxlen)
+ self.signer = signer
+ self.scanlist = scanlist or ('REMOTE_USER','REMOTE_SESSION')
+ self.application = application
+ self.cookie_name = cookie_name or self.cookie_name
+
+ def __call__(self, environ, start_response):
+ if self.environ_name in environ:
+ raise AssertionError("AuthCookie already installed!")
+ scanlist = self.environ_class(self, self.scanlist)
+ jar = get_cookies(environ)
+ if jar.has_key(self.cookie_name):
+ content = self.signer.auth(jar[self.cookie_name].value)
+ if content:
+ for pair in content.split(";"):
+ (k, v) = pair.split("=")
+ k = decode(k)
+ if k not in scanlist:
+ scanlist.append(k)
+ if k in environ:
+ continue
+ environ[k] = decode(v)
+ if 'REMOTE_USER' == k:
+ environ['AUTH_TYPE'] = 'cookie'
+ environ[self.environ_name] = scanlist
+ if "paste.httpexceptions" in environ:
+ warnings.warn("Since paste.httpexceptions is hooked in your "
+ "processing chain before paste.auth.cookie, if an "
+ "HTTPRedirection is raised, the cookies this module sets "
+ "will not be included in your response.\n")
+
+ def response_hook(status, response_headers, exc_info=None):
+ """
+ Scan the environment for keys specified in the scanlist,
+ pack up their values, signs the content and issues a cookie.
+ """
+ scanlist = environ.get(self.environ_name)
+ assert scanlist and isinstance(scanlist, self.environ_class)
+ content = []
+ for k in scanlist:
+ v = environ.get(k)
+ if v is not None:
+ if type(v) is not str:
+ raise ValueError(
+ "The value of the environmental variable %r "
+ "is not a str (only str is allowed; got %r)"
+ % (k, v))
+ content.append("%s=%s" % (encode(k), encode(v)))
+ if content:
+ content = ";".join(content)
+ content = self.signer.sign(content)
+ cookie = '%s=%s; Path=/;' % (self.cookie_name, content)
+ if 'https' == environ['wsgi.url_scheme']:
+ cookie += ' secure;'
+ response_headers.append(('Set-Cookie', cookie))
+ return start_response(status, response_headers, exc_info)
+ return self.application(environ, response_hook)
+
+middleware = AuthCookieHandler
+
+# Paste Deploy entry point:
+def make_auth_cookie(
+ app, global_conf,
+ # Should this get picked up from global_conf somehow?:
+ cookie_name='PASTE_AUTH_COOKIE',
+ scanlist=('REMOTE_USER', 'REMOTE_SESSION'),
+ # signer cannot be set
+ secret=None,
+ timeout=30,
+ maxlen=4096):
+ """
+ This middleware uses cookies to stash-away a previously
+ authenticated user (and perhaps other variables) so that
+ re-authentication is not needed. This does not implement
+ sessions; and therefore N servers can be syncronized to accept the
+ same saved authentication if they all use the same cookie_name and
+ secret.
+
+ By default, this handler scans the `environ` for the REMOTE_USER
+ and REMOTE_SESSION key; if found, it is stored. It can be
+ configured to scan other `environ` keys as well -- but be careful
+ not to exceed 2-3k (so that the encoded and signed cookie does not
+ exceed 4k). You can ask it to handle other environment variables
+ by doing:
+
+ ``environ['paste.auth.cookie'].append('your.environ.variable')``
+
+ Configuration:
+
+ ``cookie_name``
+
+ The name of the cookie used to store this content, by
+ default it is ``PASTE_AUTH_COOKIE``.
+
+ ``scanlist``
+
+ This is the initial set of ``environ`` keys to
+ save/restore to the signed cookie. By default is consists
+ only of ``REMOTE_USER`` and ``REMOTE_SESSION``; any
+ space-separated list of environment keys will work.
+ However, be careful, as the total saved size is limited to
+ around 3k.
+
+ ``secret``
+
+ The secret that will be used to sign the cookies. If you
+ don't provide one (and none is set globally) then a random
+ secret will be created. Each time the server is restarted
+ a new secret will then be created and all cookies will
+ become invalid! This can be any string value.
+
+ ``timeout``
+
+ The time to keep the cookie, expressed in minutes. This
+ is handled server-side, so a new cookie with a new timeout
+ is added to every response.
+
+ ``maxlen``
+
+ The maximum length of the cookie that is sent (default 4k,
+ which is a typical browser maximum)
+
+ """
+ if isinstance(scanlist, basestring):
+ scanlist = scanlist.split()
+ if secret is None and global_conf.get('secret'):
+ secret = global_conf['secret']
+ try:
+ timeout = int(timeout)
+ except ValueError:
+ raise ValueError('Bad value for timeout (must be int): %r'
+ % timeout)
+ try:
+ maxlen = int(maxlen)
+ except ValueError:
+ raise ValueError('Bad value for maxlen (must be int): %r'
+ % maxlen)
+ return AuthCookieHandler(
+ app, cookie_name=cookie_name, scanlist=scanlist,
+ secret=secret, timeout=timeout, maxlen=maxlen)
+
+__all__ = ['AuthCookieHandler', 'AuthCookieSigner', 'AuthCookieEnviron']
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
+
diff --git a/paste/auth/digest.py b/paste/auth/digest.py
new file mode 100644
index 0000000..483c38d
--- /dev/null
+++ b/paste/auth/digest.py
@@ -0,0 +1,214 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Digest HTTP/1.1 Authentication
+
+This module implements ``Digest`` authentication as described by
+RFC 2617 [1]_ .
+
+Basically, you just put this module before your application, and it
+takes care of requesting and handling authentication requests. This
+module has been tested with several common browsers "out-in-the-wild".
+
+>>> from paste.wsgilib import dump_environ
+>>> from paste.httpserver import serve
+>>> # from paste.auth.digest import digest_password, AuthDigestHandler
+>>> realm = 'Test Realm'
+>>> def authfunc(environ, realm, username):
+... return digest_password(realm, username, username)
+>>> serve(AuthDigestHandler(dump_environ, realm, authfunc))
+serving on...
+
+This code has not been audited by a security expert, please use with
+caution (or better yet, report security holes). At this time, this
+implementation does not provide for further challenges, nor does it
+support Authentication-Info header. It also uses md5, and an option
+to use sha would be a good thing.
+
+.. [1] http://www.faqs.org/rfcs/rfc2617.html
+"""
+from paste.httpexceptions import HTTPUnauthorized
+from paste.httpheaders import *
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+import time, random
+from urllib import quote as url_quote
+
+def digest_password(realm, username, password):
+ """ construct the appropriate hashcode needed for HTTP digest """
+ return md5("%s:%s:%s" % (username, realm, password)).hexdigest()
+
+class AuthDigestAuthenticator(object):
+ """ implementation of RFC 2617 - HTTP Digest Authentication """
+ def __init__(self, realm, authfunc):
+ self.nonce = {} # list to prevent replay attacks
+ self.authfunc = authfunc
+ self.realm = realm
+
+ def build_authentication(self, stale = ''):
+ """ builds the authentication error """
+ nonce = md5(
+ "%s:%s" % (time.time(), random.random())).hexdigest()
+ opaque = md5(
+ "%s:%s" % (time.time(), random.random())).hexdigest()
+ self.nonce[nonce] = None
+ parts = {'realm': self.realm, 'qop': 'auth',
+ 'nonce': nonce, 'opaque': opaque }
+ if stale:
+ parts['stale'] = 'true'
+ head = ", ".join(['%s="%s"' % (k, v) for (k, v) in parts.items()])
+ head = [("WWW-Authenticate", 'Digest %s' % head)]
+ return HTTPUnauthorized(headers=head)
+
+ def compute(self, ha1, username, response, method,
+ path, nonce, nc, cnonce, qop):
+ """ computes the authentication, raises error if unsuccessful """
+ if not ha1:
+ return self.build_authentication()
+ ha2 = md5('%s:%s' % (method, path)).hexdigest()
+ if qop:
+ chk = "%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2)
+ else:
+ chk = "%s:%s:%s" % (ha1, nonce, ha2)
+ if response != md5(chk).hexdigest():
+ if nonce in self.nonce:
+ del self.nonce[nonce]
+ return self.build_authentication()
+ pnc = self.nonce.get(nonce,'00000000')
+ if nc <= pnc:
+ if nonce in self.nonce:
+ del self.nonce[nonce]
+ return self.build_authentication(stale = True)
+ self.nonce[nonce] = nc
+ return username
+
+ def authenticate(self, environ):
+ """ This function takes a WSGI environment and authenticates
+ the request returning authenticated user or error.
+ """
+ method = REQUEST_METHOD(environ)
+ fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote(PATH_INFO(environ))
+ authorization = AUTHORIZATION(environ)
+ if not authorization:
+ return self.build_authentication()
+ (authmeth, auth) = authorization.split(" ", 1)
+ if 'digest' != authmeth.lower():
+ return self.build_authentication()
+ amap = {}
+ for itm in auth.split(","):
+ (k,v) = [s.strip() for s in itm.strip().split("=", 1)]
+ amap[k] = v.replace('"', '')
+ try:
+ username = amap['username']
+ authpath = amap['uri']
+ nonce = amap['nonce']
+ realm = amap['realm']
+ response = amap['response']
+ assert authpath.split("?", 1)[0] in fullpath
+ assert realm == self.realm
+ qop = amap.get('qop', '')
+ cnonce = amap.get('cnonce', '')
+ nc = amap.get('nc', '00000000')
+ if qop:
+ assert 'auth' == qop
+ assert nonce and nc
+ except:
+ return self.build_authentication()
+ ha1 = self.authfunc(environ, realm, username)
+ return self.compute(ha1, username, response, method, authpath,
+ nonce, nc, cnonce, qop)
+
+ __call__ = authenticate
+
+class AuthDigestHandler(object):
+ """
+ middleware for HTTP Digest authentication (RFC 2617)
+
+ This component follows the procedure below:
+
+ 0. If the REMOTE_USER environment variable is already populated;
+ then this middleware is a no-op, and the request is passed
+ along to the application.
+
+ 1. If the HTTP_AUTHORIZATION header was not provided or specifies
+ an algorithem other than ``digest``, then a HTTPUnauthorized
+ response is generated with the challenge.
+
+ 2. If the response is malformed or or if the user's credientials
+ do not pass muster, another HTTPUnauthorized is raised.
+
+ 3. If all goes well, and the user's credintials pass; then
+ REMOTE_USER environment variable is filled in and the
+ AUTH_TYPE is listed as 'digest'.
+
+ Parameters:
+
+ ``application``
+
+ The application object is called only upon successful
+ authentication, and can assume ``environ['REMOTE_USER']``
+ is set. If the ``REMOTE_USER`` is already set, this
+ middleware is simply pass-through.
+
+ ``realm``
+
+ This is a identifier for the authority that is requesting
+ authorization. It is shown to the user and should be unique
+ within the domain it is being used.
+
+ ``authfunc``
+
+ This is a callback function which performs the actual
+ authentication; the signature of this callback is:
+
+ authfunc(environ, realm, username) -> hashcode
+
+ This module provides a 'digest_password' helper function
+ which can help construct the hashcode; it is recommended
+ that the hashcode is stored in a database, not the user's
+ actual password (since you only need the hashcode).
+ """
+ def __init__(self, application, realm, authfunc):
+ self.authenticate = AuthDigestAuthenticator(realm, authfunc)
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ username = REMOTE_USER(environ)
+ if not username:
+ result = self.authenticate(environ)
+ if isinstance(result, str):
+ AUTH_TYPE.update(environ,'digest')
+ REMOTE_USER.update(environ, result)
+ else:
+ return result.wsgi_application(environ, start_response)
+ return self.application(environ, start_response)
+
+middleware = AuthDigestHandler
+
+__all__ = ['digest_password', 'AuthDigestHandler' ]
+
+def make_digest(app, global_conf, realm, authfunc, **kw):
+ """
+ Grant access via digest authentication
+
+ Config looks like this::
+
+ [filter:grant]
+ use = egg:Paste#auth_digest
+ realm=myrealm
+ authfunc=somepackage.somemodule:somefunction
+
+ """
+ from paste.util.import_string import eval_import
+ import types
+ authfunc = eval_import(authfunc)
+ assert isinstance(authfunc, types.FunctionType), "authfunc must resolve to a function"
+ return AuthDigestHandler(app, realm, authfunc)
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
diff --git a/paste/auth/form.py b/paste/auth/form.py
new file mode 100644
index 0000000..4e6aa49
--- /dev/null
+++ b/paste/auth/form.py
@@ -0,0 +1,149 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Authentication via HTML Form
+
+This is a very simple HTML form login screen that asks for the username
+and password. This middleware component requires that an authorization
+function taking the name and passsword and that it be placed in your
+application stack. This class does not include any session management
+code or way to save the user's authorization; however, it is easy enough
+to put ``paste.auth.cookie`` in your application stack.
+
+>>> from paste.wsgilib import dump_environ
+>>> from paste.httpserver import serve
+>>> from paste.auth.cookie import AuthCookieHandler
+>>> from paste.auth.form import AuthFormHandler
+>>> def authfunc(environ, username, password):
+... return username == password
+>>> serve(AuthCookieHandler(
+... AuthFormHandler(dump_environ, authfunc)))
+serving on...
+
+"""
+from paste.request import construct_url, parse_formvars
+
+TEMPLATE = """\
+<html>
+ <head><title>Please Login!</title></head>
+ <body>
+ <h1>Please Login</h1>
+ <form action="%s" method="post">
+ <dl>
+ <dt>Username:</dt>
+ <dd><input type="text" name="username"></dd>
+ <dt>Password:</dt>
+ <dd><input type="password" name="password"></dd>
+ </dl>
+ <input type="submit" name="authform" />
+ <hr />
+ </form>
+ </body>
+</html>
+"""
+
+class AuthFormHandler(object):
+ """
+ HTML-based login middleware
+
+ This causes a HTML form to be returned if ``REMOTE_USER`` is
+ not found in the ``environ``. If the form is returned, the
+ ``username`` and ``password`` combination are given to a
+ user-supplied authentication function, ``authfunc``. If this
+ is successful, then application processing continues.
+
+ Parameters:
+
+ ``application``
+
+ The application object is called only upon successful
+ authentication, and can assume ``environ['REMOTE_USER']``
+ is set. If the ``REMOTE_USER`` is already set, this
+ middleware is simply pass-through.
+
+ ``authfunc``
+
+ This is a mandatory user-defined function which takes a
+ ``environ``, ``username`` and ``password`` for its first
+ three arguments. It should return ``True`` if the user is
+ authenticated.
+
+ ``template``
+
+ This is an optional (a default is provided) HTML
+ fragment that takes exactly one ``%s`` substution
+ argument; which *must* be used for the form's ``action``
+ to ensure that this middleware component does not alter
+ the current path. The HTML form must use ``POST`` and
+ have two input names: ``username`` and ``password``.
+
+ Since the authentication form is submitted (via ``POST``)
+ neither the ``PATH_INFO`` nor the ``QUERY_STRING`` are accessed,
+ and hence the current path remains _unaltered_ through the
+ entire authentication process. If authentication succeeds, the
+ ``REQUEST_METHOD`` is converted from a ``POST`` to a ``GET``,
+ so that a redirect is unnecessary (unlike most form auth
+ implementations)
+ """
+
+ def __init__(self, application, authfunc, template=None):
+ self.application = application
+ self.authfunc = authfunc
+ self.template = template or TEMPLATE
+
+ def __call__(self, environ, start_response):
+ username = environ.get('REMOTE_USER','')
+ if username:
+ return self.application(environ, start_response)
+
+ if 'POST' == environ['REQUEST_METHOD']:
+ formvars = parse_formvars(environ, include_get_vars=False)
+ username = formvars.get('username')
+ password = formvars.get('password')
+ if username and password:
+ if self.authfunc(environ, username, password):
+ environ['AUTH_TYPE'] = 'form'
+ environ['REMOTE_USER'] = username
+ environ['REQUEST_METHOD'] = 'GET'
+ environ['CONTENT_LENGTH'] = ''
+ environ['CONTENT_TYPE'] = ''
+ del environ['paste.parsed_formvars']
+ return self.application(environ, start_response)
+
+ content = self.template % construct_url(environ)
+ start_response("200 OK", [('Content-Type', 'text/html'),
+ ('Content-Length', str(len(content)))])
+ return [content]
+
+middleware = AuthFormHandler
+
+__all__ = ['AuthFormHandler']
+
+def make_form(app, global_conf, realm, authfunc, **kw):
+ """
+ Grant access via form authentication
+
+ Config looks like this::
+
+ [filter:grant]
+ use = egg:Paste#auth_form
+ realm=myrealm
+ authfunc=somepackage.somemodule:somefunction
+
+ """
+ from paste.util.import_string import eval_import
+ import types
+ authfunc = eval_import(authfunc)
+ assert isinstance(authfunc, types.FunctionType), "authfunc must resolve to a function"
+ template = kw.get('template')
+ if template is not None:
+ template = eval_import(template)
+ assert isinstance(template, str), "template must resolve to a string"
+
+ return AuthFormHandler(app, authfunc, template)
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
diff --git a/paste/auth/grantip.py b/paste/auth/grantip.py
new file mode 100644
index 0000000..94b3900
--- /dev/null
+++ b/paste/auth/grantip.py
@@ -0,0 +1,113 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Grant roles and logins based on IP address.
+"""
+from paste.util import ip4
+
+class GrantIPMiddleware(object):
+
+ """
+ On each request, ``ip_map`` is checked against ``REMOTE_ADDR``
+ and logins and roles are assigned based on that.
+
+ ``ip_map`` is a map of {ip_mask: (username, roles)}. Either
+ ``username`` or ``roles`` may be None. Roles may also be prefixed
+ with ``-``, like ``'-system'`` meaning that role should be
+ revoked. ``'__remove__'`` for a username will remove the username.
+
+ If ``clobber_username`` is true (default) then any user
+ specification will override the current value of ``REMOTE_USER``.
+ ``'__remove__'`` will always clobber the username.
+
+ ``ip_mask`` is something that `paste.util.ip4:IP4Range
+ <class-paste.util.ip4.IP4Range.html>`_ can parse. Simple IP
+ addresses, IP/mask, ip<->ip ranges, and hostnames are allowed.
+ """
+
+ def __init__(self, app, ip_map, clobber_username=True):
+ self.app = app
+ self.ip_map = []
+ for key, value in ip_map.items():
+ self.ip_map.append((ip4.IP4Range(key),
+ self._convert_user_role(value[0], value[1])))
+ self.clobber_username = clobber_username
+
+ def _convert_user_role(self, username, roles):
+ if roles and isinstance(roles, basestring):
+ roles = roles.split(',')
+ return (username, roles)
+
+ def __call__(self, environ, start_response):
+ addr = ip4.ip2int(environ['REMOTE_ADDR'], False)
+ remove_user = False
+ add_roles = []
+ for range, (username, roles) in self.ip_map:
+ if addr in range:
+ if roles:
+ add_roles.extend(roles)
+ if username == '__remove__':
+ remove_user = True
+ elif username:
+ if (not environ.get('REMOTE_USER')
+ or self.clobber_username):
+ environ['REMOTE_USER'] = username
+ if (remove_user and 'REMOTE_USER' in environ):
+ del environ['REMOTE_USER']
+ if roles:
+ self._set_roles(environ, add_roles)
+ return self.app(environ, start_response)
+
+ def _set_roles(self, environ, roles):
+ cur_roles = environ.get('REMOTE_USER_TOKENS', '').split(',')
+ # Get rid of empty roles:
+ cur_roles = filter(None, cur_roles)
+ remove_roles = []
+ for role in roles:
+ if role.startswith('-'):
+ remove_roles.append(role[1:])
+ else:
+ if role not in cur_roles:
+ cur_roles.append(role)
+ for role in remove_roles:
+ if role in cur_roles:
+ cur_roles.remove(role)
+ environ['REMOTE_USER_TOKENS'] = ','.join(cur_roles)
+
+
+def make_grantip(app, global_conf, clobber_username=False, **kw):
+ """
+ Grant roles or usernames based on IP addresses.
+
+ Config looks like this::
+
+ [filter:grant]
+ use = egg:Paste#grantip
+ clobber_username = true
+ # Give localhost system role (no username):
+ 127.0.0.1 = -:system
+ # Give everyone in 192.168.0.* editor role:
+ 192.168.0.0/24 = -:editor
+ # Give one IP the username joe:
+ 192.168.0.7 = joe
+ # And one IP is should not be logged in:
+ 192.168.0.10 = __remove__:-editor
+
+ """
+ from paste.deploy.converters import asbool
+ clobber_username = asbool(clobber_username)
+ ip_map = {}
+ for key, value in kw.items():
+ if ':' in value:
+ username, role = value.split(':', 1)
+ else:
+ username = value
+ role = ''
+ if username == '-':
+ username = ''
+ if role == '-':
+ role = ''
+ ip_map[key] = value
+ return GrantIPMiddleware(app, ip_map, clobber_username)
+
+
diff --git a/paste/auth/multi.py b/paste/auth/multi.py
new file mode 100644
index 0000000..b378fa6
--- /dev/null
+++ b/paste/auth/multi.py
@@ -0,0 +1,79 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Authentication via Multiple Methods
+
+In some environments, the choice of authentication method to be used
+depends upon the environment and is not "fixed". This middleware allows
+N authentication methods to be registered along with a goodness function
+which determines which method should be used. The following example
+demonstrates how to use both form and digest authentication in a server
+stack; by default it uses form-based authentication unless
+``*authmeth=digest`` is specified as a query argument.
+
+>>> from paste.auth import form, cookie, digest, multi
+>>> from paste.wsgilib import dump_environ
+>>> from paste.httpserver import serve
+>>>
+>>> multi = multi.MultiHandler(dump_environ)
+>>> def authfunc(environ, realm, user):
+... return digest.digest_password(realm, user, user)
+>>> multi.add_method('digest', digest.middleware, "Test Realm", authfunc)
+>>> multi.set_query_argument('digest')
+>>>
+>>> def authfunc(environ, username, password):
+... return username == password
+>>> multi.add_method('form', form.middleware, authfunc)
+>>> multi.set_default('form')
+>>> serve(cookie.middleware(multi))
+serving on...
+
+"""
+
+class MultiHandler(object):
+ """
+ Multiple Authentication Handler
+
+ This middleware provides two othogonal facilities:
+
+ - a manner to register any number of authentication middlewares
+
+ - a mechanism to register predicates which cause one of the
+ registered middlewares to be used depending upon the request
+
+ If none of the predicates returns True, then the application is
+ invoked directly without middleware
+ """
+ def __init__(self, application):
+ self.application = application
+ self.default = application
+ self.binding = {}
+ self.predicate = []
+ def add_method(self, name, factory, *args, **kwargs):
+ self.binding[name] = factory(self.application, *args, **kwargs)
+ def add_predicate(self, name, checker):
+ self.predicate.append((checker, self.binding[name]))
+ def set_default(self, name):
+ """ set default authentication method """
+ self.default = self.binding[name]
+ def set_query_argument(self, name, key = '*authmeth', value = None):
+ """ choose authentication method based on a query argument """
+ lookfor = "%s=%s" % (key, value or name)
+ self.add_predicate(name,
+ lambda environ: lookfor in environ.get('QUERY_STRING',''))
+ def __call__(self, environ, start_response):
+ for (checker, binding) in self.predicate:
+ if checker(environ):
+ return binding(environ, start_response)
+ return self.default(environ, start_response)
+
+middleware = MultiHandler
+
+__all__ = ['MultiHandler']
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
+
diff --git a/paste/auth/open_id.py b/paste/auth/open_id.py
new file mode 100644
index 0000000..f6efe61
--- /dev/null
+++ b/paste/auth/open_id.py
@@ -0,0 +1,412 @@
+# (c) 2005 Ben Bangert
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+OpenID Authentication (Consumer)
+
+OpenID is a distributed authentication system for single sign-on originally
+developed at/for LiveJournal.com.
+
+ http://openid.net/
+
+URL. You can have multiple identities in the same way you can have multiple
+URLs. All OpenID does is provide a way to prove that you own a URL (identity).
+And it does this without passing around your password, your email address, or
+anything you don't want it to. There's no profile exchange component at all:
+your profiile is your identity URL, but recipients of your identity can then
+learn more about you from any public, semantically interesting documents
+linked thereunder (FOAF, RSS, Atom, vCARD, etc.).
+
+``Note``: paste.auth.openid requires installation of the Python-OpenID
+libraries::
+
+ http://www.openidenabled.com/
+
+This module is based highly off the consumer.py that Python OpenID comes with.
+
+Using the OpenID Middleware
+===========================
+
+Using the OpenID middleware is fairly easy, the most minimal example using the
+basic login form thats included::
+
+ # Add to your wsgi app creation
+ from paste.auth import open_id
+
+ wsgi_app = open_id.middleware(wsgi_app, '/somewhere/to/store/openid/data')
+
+You will now have the OpenID form available at /oid on your site. Logging in will
+verify that the login worked.
+
+A more complete login should involve having the OpenID middleware load your own
+login page after verifying the OpenID URL so that you can retain the login
+information in your webapp (session, cookies, etc.)::
+
+ wsgi_app = open_id.middleware(wsgi_app, '/somewhere/to/store/openid/data',
+ login_redirect='/your/login/code')
+
+Your login code should then be configured to retrieve 'paste.auth.open_id' for
+the users OpenID URL. If this key does not exist, the user has not logged in.
+
+Once the login is retrieved, it should be saved in your webapp, and the user
+should be redirected to wherever they would normally go after a successful
+login.
+"""
+
+__all__ = ['AuthOpenIDHandler']
+
+import cgi
+import urlparse
+import re
+
+import paste.request
+from paste import httpexceptions
+
+def quoteattr(s):
+ qs = cgi.escape(s, 1)
+ return '"%s"' % (qs,)
+
+# You may need to manually add the openid package into your
+# python path if you don't have it installed with your system python.
+# If so, uncomment the line below, and change the path where you have
+# Python-OpenID.
+# sys.path.append('/path/to/openid/')
+
+from openid.store import filestore
+from openid.consumer import consumer
+from openid.oidutil import appendArgs
+
+class AuthOpenIDHandler(object):
+ """
+ This middleware implements OpenID Consumer behavior to authenticate a
+ URL against an OpenID Server.
+ """
+
+ def __init__(self, app, data_store_path, auth_prefix='/oid',
+ login_redirect=None, catch_401=False,
+ url_to_username=None):
+ """
+ Initialize the OpenID middleware
+
+ ``app``
+ Your WSGI app to call
+
+ ``data_store_path``
+ Directory to store crypto data in for use with OpenID servers.
+
+ ``auth_prefix``
+ Location for authentication process/verification
+
+ ``login_redirect``
+ Location to load after successful process of login
+
+ ``catch_401``
+ If true, then any 401 responses will turn into open ID login
+ requirements.
+
+ ``url_to_username``
+ A function called like ``url_to_username(environ, url)``, which should
+ return a string username. If not given, the URL will be the username.
+ """
+ store = filestore.FileOpenIDStore(data_store_path)
+ self.oidconsumer = consumer.OpenIDConsumer(store)
+
+ self.app = app
+ self.auth_prefix = auth_prefix
+ self.data_store_path = data_store_path
+ self.login_redirect = login_redirect
+ self.catch_401 = catch_401
+ self.url_to_username = url_to_username
+
+ def __call__(self, environ, start_response):
+ if environ['PATH_INFO'].startswith(self.auth_prefix):
+ # Let's load everything into a request dict to pass around easier
+ request = dict(environ=environ, start=start_response, body=[])
+ request['base_url'] = paste.request.construct_url(environ, with_path_info=False,
+ with_query_string=False)
+
+ path = re.sub(self.auth_prefix, '', environ['PATH_INFO'])
+ request['parsed_uri'] = urlparse.urlparse(path)
+ request['query'] = dict(paste.request.parse_querystring(environ))
+
+ path = request['parsed_uri'][2]
+ if path == '/' or not path:
+ return self.render(request)
+ elif path == '/verify':
+ return self.do_verify(request)
+ elif path == '/process':
+ return self.do_process(request)
+ else:
+ return self.not_found(request)
+ else:
+ if self.catch_401:
+ return self.catch_401_app_call(environ, start_response)
+ return self.app(environ, start_response)
+
+ def catch_401_app_call(self, environ, start_response):
+ """
+ Call the application, and redirect if the app returns a 401 response
+ """
+ was_401 = []
+ def replacement_start_response(status, headers, exc_info=None):
+ if int(status.split(None, 1)) == 401:
+ # @@: Do I need to append something to go back to where we
+ # came from?
+ was_401.append(1)
+ def dummy_writer(v):
+ pass
+ return dummy_writer
+ else:
+ return start_response(status, headers, exc_info)
+ app_iter = self.app(environ, replacement_start_response)
+ if was_401:
+ try:
+ list(app_iter)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ redir_url = paste.request.construct_url(environ, with_path_info=False,
+ with_query_string=False)
+ exc = httpexceptions.HTTPTemporaryRedirect(redir_url)
+ return exc.wsgi_application(environ, start_response)
+ else:
+ return app_iter
+
+ def do_verify(self, request):
+ """Process the form submission, initating OpenID verification.
+ """
+
+ # First, make sure that the user entered something
+ openid_url = request['query'].get('openid_url')
+ if not openid_url:
+ return self.render(request, 'Enter an identity URL to verify.',
+ css_class='error', form_contents=openid_url)
+
+ oidconsumer = self.oidconsumer
+
+ # Then, ask the library to begin the authorization.
+ # Here we find out the identity server that will verify the
+ # user's identity, and get a token that allows us to
+ # communicate securely with the identity server.
+ status, info = oidconsumer.beginAuth(openid_url)
+
+ # If the URL was unusable (either because of network
+ # conditions, a server error, or that the response returned
+ # was not an OpenID identity page), the library will return
+ # an error code. Let the user know that that URL is unusable.
+ if status in [consumer.HTTP_FAILURE, consumer.PARSE_ERROR]:
+ if status == consumer.HTTP_FAILURE:
+ fmt = 'Failed to retrieve <q>%s</q>'
+ else:
+ fmt = 'Could not find OpenID information in <q>%s</q>'
+
+ message = fmt % (cgi.escape(openid_url),)
+ return self.render(request, message, css_class='error', form_contents=openid_url)
+ elif status == consumer.SUCCESS:
+ # The URL was a valid identity URL. Now we construct a URL
+ # that will get us to process the server response. We will
+ # need the token from the beginAuth call when processing
+ # the response. A cookie or a session object could be used
+ # to accomplish this, but for simplicity here we just add
+ # it as a query parameter of the return-to URL.
+ return_to = self.build_url(request, 'process', token=info.token)
+
+ # Now ask the library for the URL to redirect the user to
+ # his OpenID server. It is required for security that the
+ # return_to URL must be under the specified trust_root. We
+ # just use the base_url for this server as a trust root.
+ redirect_url = oidconsumer.constructRedirect(
+ info, return_to, trust_root=request['base_url'])
+
+ # Send the redirect response
+ return self.redirect(request, redirect_url)
+ else:
+ assert False, 'Not reached'
+
+ def do_process(self, request):
+ """Handle the redirect from the OpenID server.
+ """
+ oidconsumer = self.oidconsumer
+
+ # retrieve the token from the environment (in this case, the URL)
+ token = request['query'].get('token', '')
+
+ # Ask the library to check the response that the server sent
+ # us. Status is a code indicating the response type. info is
+ # either None or a string containing more information about
+ # the return type.
+ status, info = oidconsumer.completeAuth(token, request['query'])
+
+ css_class = 'error'
+ openid_url = None
+ if status == consumer.FAILURE and info:
+ # In the case of failure, if info is non-None, it is the
+ # URL that we were verifying. We include it in the error
+ # message to help the user figure out what happened.
+ openid_url = info
+ fmt = "Verification of %s failed."
+ message = fmt % (cgi.escape(openid_url),)
+ elif status == consumer.SUCCESS:
+ # Success means that the transaction completed without
+ # error. If info is None, it means that the user cancelled
+ # the verification.
+ css_class = 'alert'
+ if info:
+ # This is a successful verification attempt. If this
+ # was a real application, we would do our login,
+ # comment posting, etc. here.
+ openid_url = info
+ if self.url_to_username:
+ username = self.url_to_username(request['environ'], openid_url)
+ else:
+ username = openid_url
+ if 'paste.auth_tkt.set_user' in request['environ']:
+ request['environ']['paste.auth_tkt.set_user'](username)
+ if not self.login_redirect:
+ fmt = ("If you had supplied a login redirect path, you would have "
+ "been redirected there. "
+ "You have successfully verified %s as your identity.")
+ message = fmt % (cgi.escape(openid_url),)
+ else:
+ # @@: This stuff doesn't make sense to me; why not a remote redirect?
+ request['environ']['paste.auth.open_id'] = openid_url
+ request['environ']['PATH_INFO'] = self.login_redirect
+ return self.app(request['environ'], request['start'])
+ #exc = httpexceptions.HTTPTemporaryRedirect(self.login_redirect)
+ #return exc.wsgi_application(request['environ'], request['start'])
+ else:
+ # cancelled
+ message = 'Verification cancelled'
+ else:
+ # Either we don't understand the code or there is no
+ # openid_url included with the error. Give a generic
+ # failure message. The library should supply debug
+ # information in a log.
+ message = 'Verification failed.'
+
+ return self.render(request, message, css_class, openid_url)
+
+ def build_url(self, request, action, **query):
+ """Build a URL relative to the server base_url, with the given
+ query parameters added."""
+ base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)
+ return appendArgs(base, query)
+
+ def redirect(self, request, redirect_url):
+ """Send a redirect response to the given URL to the browser."""
+ response_headers = [('Content-type', 'text/plain'),
+ ('Location', redirect_url)]
+ request['start']('302 REDIRECT', response_headers)
+ return ["Redirecting to %s" % redirect_url]
+
+ def not_found(self, request):
+ """Render a page with a 404 return code and a message."""
+ fmt = 'The path <q>%s</q> was not understood by this server.'
+ msg = fmt % (request['parsed_uri'],)
+ openid_url = request['query'].get('openid_url')
+ return self.render(request, msg, 'error', openid_url, status='404 Not Found')
+
+ def render(self, request, message=None, css_class='alert', form_contents=None,
+ status='200 OK', title="Python OpenID Consumer"):
+ """Render a page."""
+ response_headers = [('Content-type', 'text/html')]
+ request['start'](str(status), response_headers)
+
+ self.page_header(request, title)
+ if message:
+ request['body'].append("<div class='%s'>" % (css_class,))
+ request['body'].append(message)
+ request['body'].append("</div>")
+ self.page_footer(request, form_contents)
+ return request['body']
+
+ def page_header(self, request, title):
+ """Render the page header"""
+ request['body'].append('''\
+<html>
+ <head><title>%s</title></head>
+ <style type="text/css">
+ * {
+ font-family: verdana,sans-serif;
+ }
+ body {
+ width: 50em;
+ margin: 1em;
+ }
+ div {
+ padding: .5em;
+ }
+ table {
+ margin: none;
+ padding: none;
+ }
+ .alert {
+ border: 1px solid #e7dc2b;
+ background: #fff888;
+ }
+ .error {
+ border: 1px solid #ff0000;
+ background: #ffaaaa;
+ }
+ #verify-form {
+ border: 1px solid #777777;
+ background: #dddddd;
+ margin-top: 1em;
+ padding-bottom: 0em;
+ }
+ </style>
+ <body>
+ <h1>%s</h1>
+ <p>
+ This example consumer uses the <a
+ href="http://openid.schtuff.com/">Python OpenID</a> library. It
+ just verifies that the URL that you enter is your identity URL.
+ </p>
+''' % (title, title))
+
+ def page_footer(self, request, form_contents):
+ """Render the page footer"""
+ if not form_contents:
+ form_contents = ''
+
+ request['body'].append('''\
+ <div id="verify-form">
+ <form method="get" action=%s>
+ Identity&nbsp;URL:
+ <input type="text" name="openid_url" value=%s />
+ <input type="submit" value="Verify" />
+ </form>
+ </div>
+ </body>
+</html>
+''' % (quoteattr(self.build_url(request, 'verify')), quoteattr(form_contents)))
+
+
+middleware = AuthOpenIDHandler
+
+def make_open_id_middleware(
+ app,
+ global_conf,
+ # Should this default to something, or inherit something from global_conf?:
+ data_store_path,
+ auth_prefix='/oid',
+ login_redirect=None,
+ catch_401=False,
+ url_to_username=None,
+ apply_auth_tkt=False,
+ auth_tkt_logout_path=None):
+ from paste.deploy.converters import asbool
+ from paste.util import import_string
+ catch_401 = asbool(catch_401)
+ if url_to_username and isinstance(url_to_username, basestring):
+ url_to_username = import_string.eval_import(url_to_username)
+ apply_auth_tkt = asbool(apply_auth_tkt)
+ new_app = AuthOpenIDHandler(
+ app, data_store_path=data_store_path, auth_prefix=auth_prefix,
+ login_redirect=login_redirect, catch_401=catch_401,
+ url_to_username=url_to_username or None)
+ if apply_auth_tkt:
+ from paste.auth import auth_tkt
+ new_app = auth_tkt.make_auth_tkt_middleware(
+ new_app, global_conf, logout_path=auth_tkt_logout_path)
+ return new_app
diff --git a/paste/cascade.py b/paste/cascade.py
new file mode 100644
index 0000000..1c4acfb
--- /dev/null
+++ b/paste/cascade.py
@@ -0,0 +1,133 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Cascades through several applications, so long as applications
+return ``404 Not Found``.
+"""
+from paste import httpexceptions
+from paste.util import converters
+import tempfile
+from cStringIO import StringIO
+
+__all__ = ['Cascade']
+
+def make_cascade(loader, global_conf, catch='404', **local_conf):
+ """
+ Entry point for Paste Deploy configuration
+
+ Expects configuration like::
+
+ [composit:cascade]
+ use = egg:Paste#cascade
+ # all start with 'app' and are sorted alphabetically
+ app1 = foo
+ app2 = bar
+ ...
+ catch = 404 500 ...
+ """
+ catch = map(int, converters.aslist(catch))
+ apps = []
+ for name, value in local_conf.items():
+ if not name.startswith('app'):
+ raise ValueError(
+ "Bad configuration key %r (=%r); all configuration keys "
+ "must start with 'app'"
+ % (name, value))
+ app = loader.get_app(value, global_conf=global_conf)
+ apps.append((name, app))
+ apps.sort()
+ apps = [app for name, app in apps]
+ return Cascade(apps, catch=catch)
+
+class Cascade(object):
+
+ """
+ Passed a list of applications, ``Cascade`` will try each of them
+ in turn. If one returns a status code listed in ``catch`` (by
+ default just ``404 Not Found``) then the next application is
+ tried.
+
+ If all applications fail, then the last application's failure
+ response is used.
+
+ Instances of this class are WSGI applications.
+ """
+
+ def __init__(self, applications, catch=(404,)):
+ self.apps = applications
+ self.catch_codes = {}
+ self.catch_exceptions = []
+ for error in catch:
+ if isinstance(error, str):
+ error = int(error.split(None, 1)[0])
+ if isinstance(error, httpexceptions.HTTPException):
+ exc = error
+ code = error.code
+ else:
+ exc = httpexceptions.get_exception(error)
+ code = error
+ self.catch_codes[code] = exc
+ self.catch_exceptions.append(exc)
+ self.catch_exceptions = tuple(self.catch_exceptions)
+
+ def __call__(self, environ, start_response):
+ """
+ WSGI application interface
+ """
+ failed = []
+ def repl_start_response(status, headers, exc_info=None):
+ code = int(status.split(None, 1)[0])
+ if code in self.catch_codes:
+ failed.append(None)
+ return _consuming_writer
+ return start_response(status, headers, exc_info)
+
+ try:
+ length = int(environ.get('CONTENT_LENGTH', 0) or 0)
+ except ValueError:
+ length = 0
+ if length > 0:
+ # We have to copy wsgi.input
+ copy_wsgi_input = True
+ if length > 4096 or length < 0:
+ f = tempfile.TemporaryFile()
+ if length < 0:
+ f.write(environ['wsgi.input'].read())
+ else:
+ copy_len = length
+ while copy_len > 0:
+ chunk = environ['wsgi.input'].read(min(copy_len, 4096))
+ if not chunk:
+ raise IOError("Request body truncated")
+ f.write(chunk)
+ copy_len -= len(chunk)
+ f.seek(0)
+ else:
+ f = StringIO(environ['wsgi.input'].read(length))
+ environ['wsgi.input'] = f
+ else:
+ copy_wsgi_input = False
+ for app in self.apps[:-1]:
+ environ_copy = environ.copy()
+ if copy_wsgi_input:
+ environ_copy['wsgi.input'].seek(0)
+ failed = []
+ try:
+ v = app(environ_copy, repl_start_response)
+ if not failed:
+ return v
+ else:
+ if hasattr(v, 'close'):
+ # Exhaust the iterator first:
+ list(v)
+ # then close:
+ v.close()
+ except self.catch_exceptions, e:
+ pass
+ if copy_wsgi_input:
+ environ['wsgi.input'].seek(0)
+ return self.apps[-1](environ, start_response)
+
+def _consuming_writer(s):
+ pass
diff --git a/paste/cgiapp.py b/paste/cgiapp.py
new file mode 100644
index 0000000..8801750
--- /dev/null
+++ b/paste/cgiapp.py
@@ -0,0 +1,277 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Application that runs a CGI script.
+"""
+import os
+import sys
+import subprocess
+import urllib
+try:
+ import select
+except ImportError:
+ select = None
+
+from paste.util import converters
+
+__all__ = ['CGIError', 'CGIApplication']
+
+class CGIError(Exception):
+ """
+ Raised when the CGI script can't be found or doesn't
+ act like a proper CGI script.
+ """
+
+class CGIApplication(object):
+
+ """
+ This object acts as a proxy to a CGI application. You pass in the
+ script path (``script``), an optional path to search for the
+ script (if the name isn't absolute) (``path``). If you don't give
+ a path, then ``$PATH`` will be used.
+ """
+
+ def __init__(self,
+ global_conf,
+ script,
+ path=None,
+ include_os_environ=True,
+ query_string=None):
+ if global_conf:
+ raise NotImplemented(
+ "global_conf is no longer supported for CGIApplication "
+ "(use make_cgi_application); please pass None instead")
+ self.script_filename = script
+ if path is None:
+ path = os.environ.get('PATH', '').split(':')
+ self.path = path
+ if '?' in script:
+ assert query_string is None, (
+ "You cannot have '?' in your script name (%r) and also "
+ "give a query_string (%r)" % (script, query_string))
+ script, query_string = script.split('?', 1)
+ if os.path.abspath(script) != script:
+ # relative path
+ for path_dir in self.path:
+ if os.path.exists(os.path.join(path_dir, script)):
+ self.script = os.path.join(path_dir, script)
+ break
+ else:
+ raise CGIError(
+ "Script %r not found in path %r"
+ % (script, self.path))
+ else:
+ self.script = script
+ self.include_os_environ = include_os_environ
+ self.query_string = query_string
+
+ def __call__(self, environ, start_response):
+ if 'REQUEST_URI' not in environ:
+ environ['REQUEST_URI'] = (
+ urllib.quote(environ.get('SCRIPT_NAME', ''))
+ + urllib.quote(environ.get('PATH_INFO', '')))
+ if self.include_os_environ:
+ cgi_environ = os.environ.copy()
+ else:
+ cgi_environ = {}
+ for name in environ:
+ # Should unicode values be encoded?
+ if (name.upper() == name
+ and isinstance(environ[name], str)):
+ cgi_environ[name] = environ[name]
+ if self.query_string is not None:
+ old = cgi_environ.get('QUERY_STRING', '')
+ if old:
+ old += '&'
+ cgi_environ['QUERY_STRING'] = old + self.query_string
+ cgi_environ['SCRIPT_FILENAME'] = self.script
+ proc = subprocess.Popen(
+ [self.script],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=cgi_environ,
+ cwd=os.path.dirname(self.script),
+ )
+ writer = CGIWriter(environ, start_response)
+ if select and sys.platform != 'win32':
+ proc_communicate(
+ proc,
+ stdin=StdinReader.from_environ(environ),
+ stdout=writer,
+ stderr=environ['wsgi.errors'])
+ else:
+ stdout, stderr = proc.communicate(StdinReader.from_environ(environ).read())
+ if stderr:
+ environ['wsgi.errors'].write(stderr)
+ writer.write(stdout)
+ if not writer.headers_finished:
+ start_response(writer.status, writer.headers)
+ return []
+
+class CGIWriter(object):
+
+ def __init__(self, environ, start_response):
+ self.environ = environ
+ self.start_response = start_response
+ self.status = '200 OK'
+ self.headers = []
+ self.headers_finished = False
+ self.writer = None
+ self.buffer = ''
+
+ def write(self, data):
+ if self.headers_finished:
+ self.writer(data)
+ return
+ self.buffer += data
+ while '\n' in self.buffer:
+ if '\r\n' in self.buffer and self.buffer.find('\r\n') < self.buffer.find('\n'):
+ line1, self.buffer = self.buffer.split('\r\n', 1)
+ else:
+ line1, self.buffer = self.buffer.split('\n', 1)
+ if not line1:
+ self.headers_finished = True
+ self.writer = self.start_response(
+ self.status, self.headers)
+ self.writer(self.buffer)
+ del self.buffer
+ del self.headers
+ del self.status
+ break
+ elif ':' not in line1:
+ raise CGIError(
+ "Bad header line: %r" % line1)
+ else:
+ name, value = line1.split(':', 1)
+ value = value.lstrip()
+ name = name.strip()
+ if name.lower() == 'status':
+ if ' ' not in value:
+ # WSGI requires this space, sometimes CGI scripts don't set it:
+ value = '%s General' % value
+ self.status = value
+ else:
+ self.headers.append((name, value))
+
+class StdinReader(object):
+
+ def __init__(self, stdin, content_length):
+ self.stdin = stdin
+ self.content_length = content_length
+
+ def from_environ(cls, environ):
+ length = environ.get('CONTENT_LENGTH')
+ if length:
+ length = int(length)
+ else:
+ length = 0
+ return cls(environ['wsgi.input'], length)
+
+ from_environ = classmethod(from_environ)
+
+ def read(self, size=None):
+ if not self.content_length:
+ return ''
+ if size is None:
+ text = self.stdin.read(self.content_length)
+ else:
+ text = self.stdin.read(min(self.content_length, size))
+ self.content_length -= len(text)
+ return text
+
+def proc_communicate(proc, stdin=None, stdout=None, stderr=None):
+ """
+ Run the given process, piping input/output/errors to the given
+ file-like objects (which need not be actual file objects, unlike
+ the arguments passed to Popen). Wait for process to terminate.
+
+ Note: this is taken from the posix version of
+ subprocess.Popen.communicate, but made more general through the
+ use of file-like objects.
+ """
+ read_set = []
+ write_set = []
+ input_buffer = ''
+ trans_nl = proc.universal_newlines and hasattr(open, 'newlines')
+
+ if proc.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ proc.stdin.flush()
+ if input:
+ write_set.append(proc.stdin)
+ else:
+ proc.stdin.close()
+ else:
+ assert stdin is None
+ if proc.stdout:
+ read_set.append(proc.stdout)
+ else:
+ assert stdout is None
+ if proc.stderr:
+ read_set.append(proc.stderr)
+ else:
+ assert stderr is None
+
+ while read_set or write_set:
+ rlist, wlist, xlist = select.select(read_set, write_set, [])
+
+ if proc.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ next, input_buffer = input_buffer, ''
+ next_len = 512-len(next)
+ if next_len:
+ next += stdin.read(next_len)
+ if not next:
+ proc.stdin.close()
+ write_set.remove(proc.stdin)
+ else:
+ bytes_written = os.write(proc.stdin.fileno(), next)
+ if bytes_written < len(next):
+ input_buffer = next[bytes_written:]
+
+ if proc.stdout in rlist:
+ data = os.read(proc.stdout.fileno(), 1024)
+ if data == "":
+ proc.stdout.close()
+ read_set.remove(proc.stdout)
+ if trans_nl:
+ data = proc._translate_newlines(data)
+ stdout.write(data)
+
+ if proc.stderr in rlist:
+ data = os.read(proc.stderr.fileno(), 1024)
+ if data == "":
+ proc.stderr.close()
+ read_set.remove(proc.stderr)
+ if trans_nl:
+ data = proc._translate_newlines(data)
+ stderr.write(data)
+
+ try:
+ proc.wait()
+ except OSError, e:
+ if e.errno != 10:
+ raise
+
+def make_cgi_application(global_conf, script, path=None, include_os_environ=None,
+ query_string=None):
+ """
+ Paste Deploy interface for :class:`CGIApplication`
+
+ This object acts as a proxy to a CGI application. You pass in the
+ script path (``script``), an optional path to search for the
+ script (if the name isn't absolute) (``path``). If you don't give
+ a path, then ``$PATH`` will be used.
+ """
+ if path is None:
+ path = global_conf.get('path') or global_conf.get('PATH')
+ include_os_environ = converters.asbool(include_os_environ)
+ return CGIApplication(
+ None,
+ script, path=path, include_os_environ=include_os_environ,
+ query_string=query_string)
diff --git a/paste/cgitb_catcher.py b/paste/cgitb_catcher.py
new file mode 100644
index 0000000..1c815b7
--- /dev/null
+++ b/paste/cgitb_catcher.py
@@ -0,0 +1,116 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+WSGI middleware
+
+Captures any exceptions and prints a pretty report. See the `cgitb
+documentation <http://python.org/doc/current/lib/module-cgitb.html>`_
+for more.
+"""
+
+import cgitb
+from cStringIO import StringIO
+import sys
+
+from paste.util import converters
+
+class NoDefault(object):
+ pass
+
+class CgitbMiddleware(object):
+
+ def __init__(self, app,
+ global_conf=None,
+ display=NoDefault,
+ logdir=None,
+ context=5,
+ format="html"):
+ self.app = app
+ if global_conf is None:
+ global_conf = {}
+ if display is NoDefault:
+ display = global_conf.get('debug')
+ if isinstance(display, basestring):
+ display = converters.asbool(display)
+ self.display = display
+ self.logdir = logdir
+ self.context = int(context)
+ self.format = format
+
+ def __call__(self, environ, start_response):
+ try:
+ app_iter = self.app(environ, start_response)
+ return self.catching_iter(app_iter, environ)
+ except:
+ exc_info = sys.exc_info()
+ start_response('500 Internal Server Error',
+ [('content-type', 'text/html')],
+ exc_info)
+ response = self.exception_handler(exc_info, environ)
+ return [response]
+
+ def catching_iter(self, app_iter, environ):
+ if not app_iter:
+ raise StopIteration
+ error_on_close = False
+ try:
+ for v in app_iter:
+ yield v
+ if hasattr(app_iter, 'close'):
+ error_on_close = True
+ app_iter.close()
+ except:
+ response = self.exception_handler(sys.exc_info(), environ)
+ if not error_on_close and hasattr(app_iter, 'close'):
+ try:
+ app_iter.close()
+ except:
+ close_response = self.exception_handler(
+ sys.exc_info(), environ)
+ response += (
+ '<hr noshade>Error in .close():<br>%s'
+ % close_response)
+ yield response
+
+ def exception_handler(self, exc_info, environ):
+ dummy_file = StringIO()
+ hook = cgitb.Hook(file=dummy_file,
+ display=self.display,
+ logdir=self.logdir,
+ context=self.context,
+ format=self.format)
+ hook(*exc_info)
+ return dummy_file.getvalue()
+
+def make_cgitb_middleware(app, global_conf,
+ display=NoDefault,
+ logdir=None,
+ context=5,
+ format='html'):
+ """
+ Wraps the application in the ``cgitb`` (standard library)
+ error catcher.
+
+ display:
+ If true (or debug is set in the global configuration)
+ then the traceback will be displayed in the browser
+
+ logdir:
+ Writes logs of all errors in that directory
+
+ context:
+ Number of lines of context to show around each line of
+ source code
+ """
+ from paste.deploy.converters import asbool
+ if display is not NoDefault:
+ display = asbool(display)
+ if 'debug' in global_conf:
+ global_conf['debug'] = asbool(global_conf['debug'])
+ return CgitbMiddleware(
+ app, global_conf=global_conf,
+ display=display,
+ logdir=logdir,
+ context=context,
+ format=format)
diff --git a/paste/config.py b/paste/config.py
new file mode 100644
index 0000000..c531579
--- /dev/null
+++ b/paste/config.py
@@ -0,0 +1,120 @@
+# (c) 2006 Ian Bicking, Philip Jenvey and contributors
+# Written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""Paste Configuration Middleware and Objects"""
+from paste.registry import RegistryManager, StackedObjectProxy
+
+__all__ = ['DispatchingConfig', 'CONFIG', 'ConfigMiddleware']
+
+class DispatchingConfig(StackedObjectProxy):
+ """
+ This is a configuration object that can be used globally,
+ imported, have references held onto. The configuration may differ
+ by thread (or may not).
+
+ Specific configurations are registered (and deregistered) either
+ for the process or for threads.
+ """
+ # @@: What should happen when someone tries to add this
+ # configuration to itself? Probably the conf should become
+ # resolved, and get rid of this delegation wrapper
+
+ def __init__(self, name='DispatchingConfig'):
+ super(DispatchingConfig, self).__init__(name=name)
+ self.__dict__['_process_configs'] = []
+
+ def push_thread_config(self, conf):
+ """
+ Make ``conf`` the active configuration for this thread.
+ Thread-local configuration always overrides process-wide
+ configuration.
+
+ This should be used like::
+
+ conf = make_conf()
+ dispatching_config.push_thread_config(conf)
+ try:
+ ... do stuff ...
+ finally:
+ dispatching_config.pop_thread_config(conf)
+ """
+ self._push_object(conf)
+
+ def pop_thread_config(self, conf=None):
+ """
+ Remove a thread-local configuration. If ``conf`` is given,
+ it is checked against the popped configuration and an error
+ is emitted if they don't match.
+ """
+ self._pop_object(conf)
+
+ def push_process_config(self, conf):
+ """
+ Like push_thread_config, but applies the configuration to
+ the entire process.
+ """
+ self._process_configs.append(conf)
+
+ def pop_process_config(self, conf=None):
+ self._pop_from(self._process_configs, conf)
+
+ def _pop_from(self, lst, conf):
+ popped = lst.pop()
+ if conf is not None and popped is not conf:
+ raise AssertionError(
+ "The config popped (%s) is not the same as the config "
+ "expected (%s)"
+ % (popped, conf))
+
+ def _current_obj(self):
+ try:
+ return super(DispatchingConfig, self)._current_obj()
+ except TypeError:
+ if self._process_configs:
+ return self._process_configs[-1]
+ raise AttributeError(
+ "No configuration has been registered for this process "
+ "or thread")
+ current = current_conf = _current_obj
+
+CONFIG = DispatchingConfig()
+
+no_config = object()
+class ConfigMiddleware(RegistryManager):
+ """
+ A WSGI middleware that adds a ``paste.config`` key (by default)
+ to the request environment, as well as registering the
+ configuration temporarily (for the length of the request) with
+ ``paste.config.CONFIG`` (or any other ``DispatchingConfig``
+ object).
+ """
+
+ def __init__(self, application, config, dispatching_config=CONFIG,
+ environ_key='paste.config'):
+ """
+ This delegates all requests to `application`, adding a *copy*
+ of the configuration `config`.
+ """
+ def register_config(environ, start_response):
+ popped_config = environ.get(environ_key, no_config)
+ current_config = environ[environ_key] = config.copy()
+ environ['paste.registry'].register(dispatching_config,
+ current_config)
+
+ try:
+ app_iter = application(environ, start_response)
+ finally:
+ if popped_config is no_config:
+ environ.pop(environ_key, None)
+ else:
+ environ[environ_key] = popped_config
+ return app_iter
+
+ super(self.__class__, self).__init__(register_config)
+
+def make_config_filter(app, global_conf, **local_conf):
+ conf = global_conf.copy()
+ conf.update(local_conf)
+ return ConfigMiddleware(app, conf)
+
+make_config_middleware = ConfigMiddleware.__doc__
diff --git a/paste/cowbell/__init__.py b/paste/cowbell/__init__.py
new file mode 100644
index 0000000..43b7097
--- /dev/null
+++ b/paste/cowbell/__init__.py
@@ -0,0 +1,104 @@
+# Cowbell images: http://commons.wikimedia.org/wiki/Image:Cowbell-1.jpg
+import os
+import re
+from paste.fileapp import FileApp
+from paste.response import header_value, remove_header
+
+SOUND = "http://www.c-eye.net/eyeon/WalkenWAVS/explorestudiospace.wav"
+
+class MoreCowbell(object):
+ def __init__(self, app):
+ self.app = app
+ def __call__(self, environ, start_response):
+ path_info = environ.get('PATH_INFO', '')
+ script_name = environ.get('SCRIPT_NAME', '')
+ for filename in ['bell-ascending.png', 'bell-descending.png']:
+ if path_info == '/.cowbell/'+ filename:
+ app = FileApp(os.path.join(os.path.dirname(__file__), filename))
+ return app(environ, start_response)
+ type = []
+ body = []
+ def repl_start_response(status, headers, exc_info=None):
+ ct = header_value(headers, 'content-type')
+ if ct and ct.startswith('text/html'):
+ type.append(ct)
+ remove_header(headers, 'content-length')
+ start_response(status, headers, exc_info)
+ return body.append
+ return start_response(status, headers, exc_info)
+ app_iter = self.app(environ, repl_start_response)
+ if type:
+ # Got text/html
+ body.extend(app_iter)
+ body = ''.join(body)
+ body = insert_head(body, self.javascript.replace('__SCRIPT_NAME__', script_name))
+ body = insert_body(body, self.resources.replace('__SCRIPT_NAME__', script_name))
+ return [body]
+ else:
+ return app_iter
+
+ javascript = '''\
+<script type="text/javascript">
+var cowbellState = 'hidden';
+var lastCowbellPosition = null;
+function showSomewhere() {
+ var sec, el;
+ if (cowbellState == 'hidden') {
+ el = document.getElementById('cowbell-ascending');
+ lastCowbellPosition = [parseInt(Math.random()*(window.innerWidth-200)),
+ parseInt(Math.random()*(window.innerHeight-200))];
+ el.style.left = lastCowbellPosition[0] + 'px';
+ el.style.top = lastCowbellPosition[1] + 'px';
+ el.style.display = '';
+ cowbellState = 'ascending';
+ sec = 1;
+ } else if (cowbellState == 'ascending') {
+ document.getElementById('cowbell-ascending').style.display = 'none';
+ el = document.getElementById('cowbell-descending');
+ el.style.left = lastCowbellPosition[0] + 'px';
+ el.style.top = lastCowbellPosition[1] + 'px';
+ el.style.display = '';
+ cowbellState = 'descending';
+ sec = 1;
+ } else {
+ document.getElementById('cowbell-descending').style.display = 'none';
+ cowbellState = 'hidden';
+ sec = Math.random()*20;
+ }
+ setTimeout(showSomewhere, sec*1000);
+}
+setTimeout(showSomewhere, Math.random()*20*1000);
+</script>
+'''
+
+ resources = '''\
+<div id="cowbell-ascending" style="display: none; position: fixed">
+<img src="__SCRIPT_NAME__/.cowbell/bell-ascending.png">
+</div>
+<div id="cowbell-descending" style="display: none; position: fixed">
+<img src="__SCRIPT_NAME__/.cowbell/bell-descending.png">
+</div>
+'''
+
+def insert_head(body, text):
+ end_head = re.search(r'</head>', body, re.I)
+ if end_head:
+ return body[:end_head.start()] + text + body[end_head.end():]
+ else:
+ return text + body
+
+def insert_body(body, text):
+ end_body = re.search(r'</body>', body, re.I)
+ if end_body:
+ return body[:end_body.start()] + text + body[end_body.end():]
+ else:
+ return body + text
+
+def make_cowbell(global_conf, app):
+ return MoreCowbell(app)
+
+if __name__ == '__main__':
+ from paste.debug.debugapp import SimpleApplication
+ app = MoreCowbell(SimpleApplication())
+ from paste.httpserver import serve
+ serve(app)
diff --git a/paste/cowbell/bell-ascending.png b/paste/cowbell/bell-ascending.png
new file mode 100644
index 0000000..42f33db
--- /dev/null
+++ b/paste/cowbell/bell-ascending.png
Binary files differ
diff --git a/paste/cowbell/bell-descending.png b/paste/cowbell/bell-descending.png
new file mode 100644
index 0000000..dac8012
--- /dev/null
+++ b/paste/cowbell/bell-descending.png
Binary files differ
diff --git a/paste/debug/__init__.py b/paste/debug/__init__.py
new file mode 100644
index 0000000..daef7cc
--- /dev/null
+++ b/paste/debug/__init__.py
@@ -0,0 +1,5 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Package for debugging and development tools
+"""
diff --git a/paste/debug/debugapp.py b/paste/debug/debugapp.py
new file mode 100755
index 0000000..190cbdd
--- /dev/null
+++ b/paste/debug/debugapp.py
@@ -0,0 +1,79 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Various Applications for Debugging/Testing Purposes
+"""
+
+import time
+__all__ = ['SimpleApplication', 'SlowConsumer']
+
+
+class SimpleApplication(object):
+ """
+ Produces a simple web page
+ """
+ def __call__(self, environ, start_response):
+ body = "<html><body>simple</body></html>"
+ start_response("200 OK", [('Content-Type', 'text/html'),
+ ('Content-Length', str(len(body)))])
+ return [body]
+
+class SlowConsumer(object):
+ """
+ Consumes an upload slowly...
+
+ NOTE: This should use the iterator form of ``wsgi.input``,
+ but it isn't implemented in paste.httpserver.
+ """
+ def __init__(self, chunk_size = 4096, delay = 1, progress = True):
+ self.chunk_size = chunk_size
+ self.delay = delay
+ self.progress = True
+
+ def __call__(self, environ, start_response):
+ size = 0
+ total = environ.get('CONTENT_LENGTH')
+ if total:
+ remaining = int(total)
+ while remaining > 0:
+ if self.progress:
+ print "%s of %s remaining" % (remaining, total)
+ if remaining > 4096:
+ chunk = environ['wsgi.input'].read(4096)
+ else:
+ chunk = environ['wsgi.input'].read(remaining)
+ if not chunk:
+ break
+ size += len(chunk)
+ remaining -= len(chunk)
+ if self.delay:
+ time.sleep(self.delay)
+ body = "<html><body>%d bytes</body></html>" % size
+ else:
+ body = ('<html><body>\n'
+ '<form method="post" enctype="multipart/form-data">\n'
+ '<input type="file" name="file">\n'
+ '<input type="submit" >\n'
+ '</form></body></html>\n')
+ print "bingles"
+ start_response("200 OK", [('Content-Type', 'text/html'),
+ ('Content-Length', len(body))])
+ return [body]
+
+def make_test_app(global_conf):
+ return SimpleApplication()
+
+make_test_app.__doc__ = SimpleApplication.__doc__
+
+def make_slow_app(global_conf, chunk_size=4096, delay=1, progress=True):
+ from paste.deploy.converters import asbool
+ return SlowConsumer(
+ chunk_size=int(chunk_size),
+ delay=int(delay),
+ progress=asbool(progress))
+
+make_slow_app.__doc__ = SlowConsumer.__doc__
diff --git a/paste/debug/doctest_webapp.py b/paste/debug/doctest_webapp.py
new file mode 100755
index 0000000..935b291
--- /dev/null
+++ b/paste/debug/doctest_webapp.py
@@ -0,0 +1,435 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+#!/usr/bin/env python2.4
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+These are functions for use when doctest-testing a document.
+"""
+
+try:
+ import subprocess
+except ImportError:
+ from paste.util import subprocess24 as subprocess
+import doctest
+import os
+import sys
+import shutil
+import re
+import cgi
+import rfc822
+from cStringIO import StringIO
+from paste.util import PySourceColor
+
+
+here = os.path.abspath(__file__)
+paste_parent = os.path.dirname(
+ os.path.dirname(os.path.dirname(here)))
+
+def run(command):
+ data = run_raw(command)
+ if data:
+ print data
+
+def run_raw(command):
+ """
+ Runs the string command, returns any output.
+ """
+ proc = subprocess.Popen(command, shell=True,
+ stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE, env=_make_env())
+ data = proc.stdout.read()
+ proc.wait()
+ while data.endswith('\n') or data.endswith('\r'):
+ data = data[:-1]
+ if data:
+ data = '\n'.join(
+ [l for l in data.splitlines() if l])
+ return data
+ else:
+ return ''
+
+def run_command(command, name, and_print=False):
+ output = run_raw(command)
+ data = '$ %s\n%s' % (command, output)
+ show_file('shell-command', name, description='shell transcript',
+ data=data)
+ if and_print and output:
+ print output
+
+def _make_env():
+ env = os.environ.copy()
+ env['PATH'] = (env.get('PATH', '')
+ + ':'
+ + os.path.join(paste_parent, 'scripts')
+ + ':'
+ + os.path.join(paste_parent, 'paste', '3rd-party',
+ 'sqlobject-files', 'scripts'))
+ env['PYTHONPATH'] = (env.get('PYTHONPATH', '')
+ + ':'
+ + paste_parent)
+ return env
+
+def clear_dir(dir):
+ """
+ Clears (deletes) the given directory
+ """
+ shutil.rmtree(dir, True)
+
+def ls(dir=None, recurse=False, indent=0):
+ """
+ Show a directory listing
+ """
+ dir = dir or os.getcwd()
+ fns = os.listdir(dir)
+ fns.sort()
+ for fn in fns:
+ full = os.path.join(dir, fn)
+ if os.path.isdir(full):
+ fn = fn + '/'
+ print ' '*indent + fn
+ if os.path.isdir(full) and recurse:
+ ls(dir=full, recurse=True, indent=indent+2)
+
+default_app = None
+default_url = None
+
+def set_default_app(app, url):
+ global default_app
+ global default_url
+ default_app = app
+ default_url = url
+
+def resource_filename(fn):
+ """
+ Returns the filename of the resource -- generally in the directory
+ resources/DocumentName/fn
+ """
+ return os.path.join(
+ os.path.dirname(sys.testing_document_filename),
+ 'resources',
+ os.path.splitext(os.path.basename(sys.testing_document_filename))[0],
+ fn)
+
+def show(path_info, example_name):
+ fn = resource_filename(example_name + '.html')
+ out = StringIO()
+ assert default_app is not None, (
+ "No default_app set")
+ url = default_url + path_info
+ out.write('<span class="doctest-url"><a href="%s">%s</a></span><br>\n'
+ % (url, url))
+ out.write('<div class="doctest-example">\n')
+ proc = subprocess.Popen(
+ ['paster', 'serve' '--server=console', '--no-verbose',
+ '--url=' + path_info],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ env=_make_env())
+ stdout, errors = proc.communicate()
+ stdout = StringIO(stdout)
+ headers = rfc822.Message(stdout)
+ content = stdout.read()
+ for header, value in headers.items():
+ if header.lower() == 'status' and int(value.split()[0]) == 200:
+ continue
+ if header.lower() in ('content-type', 'content-length'):
+ continue
+ if (header.lower() == 'set-cookie'
+ and value.startswith('_SID_')):
+ continue
+ out.write('<span class="doctest-header">%s: %s</span><br>\n'
+ % (header, value))
+ lines = [l for l in content.splitlines() if l.strip()]
+ for line in lines:
+ out.write(line + '\n')
+ if errors:
+ out.write('<pre class="doctest-errors">%s</pre>'
+ % errors)
+ out.write('</div>\n')
+ result = out.getvalue()
+ if not os.path.exists(fn):
+ f = open(fn, 'wb')
+ f.write(result)
+ f.close()
+ else:
+ f = open(fn, 'rb')
+ expected = f.read()
+ f.close()
+ if not html_matches(expected, result):
+ print 'Pages did not match. Expected from %s:' % fn
+ print '-'*60
+ print expected
+ print '='*60
+ print 'Actual output:'
+ print '-'*60
+ print result
+
+def html_matches(pattern, text):
+ regex = re.escape(pattern)
+ regex = regex.replace(r'\.\.\.', '.*')
+ regex = re.sub(r'0x[0-9a-f]+', '.*', regex)
+ regex = '^%s$' % regex
+ return re.search(regex, text)
+
+def convert_docstring_string(data):
+ if data.startswith('\n'):
+ data = data[1:]
+ lines = data.splitlines()
+ new_lines = []
+ for line in lines:
+ if line.rstrip() == '.':
+ new_lines.append('')
+ else:
+ new_lines.append(line)
+ data = '\n'.join(new_lines) + '\n'
+ return data
+
+def create_file(path, version, data):
+ data = convert_docstring_string(data)
+ write_data(path, data)
+ show_file(path, version)
+
+def append_to_file(path, version, data):
+ data = convert_docstring_string(data)
+ f = open(path, 'a')
+ f.write(data)
+ f.close()
+ # I think these appends can happen so quickly (in less than a second)
+ # that the .pyc file doesn't appear to be expired, even though it
+ # is after we've made this change; so we have to get rid of the .pyc
+ # file:
+ if path.endswith('.py'):
+ pyc_file = path + 'c'
+ if os.path.exists(pyc_file):
+ os.unlink(pyc_file)
+ show_file(path, version, description='added to %s' % path,
+ data=data)
+
+def show_file(path, version, description=None, data=None):
+ ext = os.path.splitext(path)[1]
+ if data is None:
+ f = open(path, 'rb')
+ data = f.read()
+ f.close()
+ if ext == '.py':
+ html = ('<div class="source-code">%s</div>'
+ % PySourceColor.str2html(data, PySourceColor.dark))
+ else:
+ html = '<pre class="source-code">%s</pre>' % cgi.escape(data, 1)
+ html = '<span class="source-filename">%s</span><br>%s' % (
+ description or path, html)
+ write_data(resource_filename('%s.%s.gen.html' % (path, version)),
+ html)
+
+def call_source_highlight(input, format):
+ proc = subprocess.Popen(['source-highlight', '--out-format=html',
+ '--no-doc', '--css=none',
+ '--src-lang=%s' % format], shell=False,
+ stdout=subprocess.PIPE)
+ stdout, stderr = proc.communicate(input)
+ result = stdout
+ proc.wait()
+ return result
+
+
+def write_data(path, data):
+ dir = os.path.dirname(os.path.abspath(path))
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ f = open(path, 'wb')
+ f.write(data)
+ f.close()
+
+
+def change_file(path, changes):
+ f = open(os.path.abspath(path), 'rb')
+ lines = f.readlines()
+ f.close()
+ for change_type, line, text in changes:
+ if change_type == 'insert':
+ lines[line:line] = [text]
+ elif change_type == 'delete':
+ lines[line:text] = []
+ else:
+ assert 0, (
+ "Unknown change_type: %r" % change_type)
+ f = open(path, 'wb')
+ f.write(''.join(lines))
+ f.close()
+
+class LongFormDocTestParser(doctest.DocTestParser):
+
+ """
+ This parser recognizes some reST comments as commands, without
+ prompts or expected output, like:
+
+ .. run:
+
+ do_this(...
+ ...)
+ """
+
+ _EXAMPLE_RE = re.compile(r"""
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?: (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*))
+ |
+ (?: # This is for longer commands that are prefixed with a reST
+ # comment like '.. run:' (two colons makes that a directive).
+ # These commands cannot have any output.
+
+ (?:^\.\.[ ]*(?P<run>run):[ ]*\n) # Leading command/command
+ (?:[ ]*\n)? # Blank line following
+ (?P<runsource>
+ (?:(?P<runindent> [ ]+)[^ ].*$)
+ (?:\n [ ]+ .*)*)
+ )
+ |
+ (?: # This is for shell commands
+
+ (?P<shellsource>
+ (?:^(P<shellindent> [ ]*) [$] .*) # Shell line
+ (?:\n [ ]* [>] .*)*) # Continuation
+ \n?
+ # Want consists of any non-blank lines that do not start with $
+ (?P<shellwant> (?:(?![ ]*$)
+ (?![ ]*[$]$)
+ .*$\n?
+ )*))
+ """, re.MULTILINE | re.VERBOSE)
+
+ def _parse_example(self, m, name, lineno):
+ r"""
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+
+ >>> def parseit(s):
+ ... p = LongFormDocTestParser()
+ ... return p._parse_example(p._EXAMPLE_RE.search(s), '<string>', 1)
+ >>> parseit('>>> 1\n1')
+ ('1', {}, '1', None)
+ >>> parseit('>>> (1\n... +1)\n2')
+ ('(1\n+1)', {}, '2', None)
+ >>> parseit('.. run:\n\n test1\n test2\n')
+ ('test1\ntest2', {}, '', None)
+ """
+ # Get the example's indentation level.
+ runner = m.group('run') or ''
+ indent = len(m.group('%sindent' % runner))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('%ssource' % runner).split('\n')
+ if runner:
+ self._check_prefix(source_lines[1:], ' '*indent, name, lineno)
+ else:
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[2:], ' '*indent + '.', name, lineno)
+ if runner:
+ source = '\n'.join([sl[indent:] for sl in source_lines])
+ else:
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ if runner:
+ want = ''
+ exc_msg = None
+ else:
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ # @@: Erg, this is the only line I need to change...
+ output.append(doctest.Example(
+ source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent') or m.group('runindent')),
+ options=options))
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+
+
+if __name__ == '__main__':
+ if sys.argv[1:] and sys.argv[1] == 'doctest':
+ doctest.testmod()
+ sys.exit()
+ if not paste_parent in sys.path:
+ sys.path.append(paste_parent)
+ for fn in sys.argv[1:]:
+ fn = os.path.abspath(fn)
+ # @@: OK, ick; but this module gets loaded twice
+ sys.testing_document_filename = fn
+ doctest.testfile(
+ fn, module_relative=False,
+ optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE,
+ parser=LongFormDocTestParser())
+ new = os.path.splitext(fn)[0] + '.html'
+ assert new != fn
+ os.system('rst2html.py %s > %s' % (fn, new))
diff --git a/paste/debug/fsdiff.py b/paste/debug/fsdiff.py
new file mode 100644
index 0000000..2849ea8
--- /dev/null
+++ b/paste/debug/fsdiff.py
@@ -0,0 +1,409 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Module to find differences over time in a filesystem
+
+Basically this takes a snapshot of a directory, then sees what changes
+were made. The contents of the files are not checked, so you can
+detect that the content was changed, but not what the old version of
+the file was.
+"""
+
+import os
+from fnmatch import fnmatch
+from datetime import datetime
+from paste.util.UserDict24 import IterableUserDict
+import operator
+import re
+
+__all__ = ['Diff', 'Snapshot', 'File', 'Dir', 'report_expected_diffs',
+ 'show_diff']
+
+class Diff(object):
+
+ """
+ Represents the difference between two snapshots
+ """
+
+ def __init__(self, before, after):
+ self.before = before
+ self.after = after
+ self._calculate()
+
+ def _calculate(self):
+ before = self.before.data
+ after = self.after.data
+ self.deleted = {}
+ self.updated = {}
+ self.created = after.copy()
+ for path, f in before.items():
+ if path not in after:
+ self.deleted[path] = f
+ continue
+ del self.created[path]
+ if f.mtime < after[path].mtime:
+ self.updated[path] = after[path]
+
+ def __str__(self):
+ return self.report()
+
+ def report(self, header=True, dates=False):
+ s = []
+ if header:
+ s.append('Difference in %s from %s to %s:' %
+ (self.before.base_path,
+ self.before.calculated,
+ self.after.calculated))
+ for name, files, show_size in [
+ ('created', self.created, True),
+ ('deleted', self.deleted, True),
+ ('updated', self.updated, True)]:
+ if files:
+ s.append('-- %s: -------------------' % name)
+ files = files.items()
+ files.sort()
+ last = ''
+ for path, f in files:
+ t = ' %s' % _space_prefix(last, path, indent=4,
+ include_sep=False)
+ last = path
+ if show_size and f.size != 'N/A':
+ t += ' (%s bytes)' % f.size
+ if dates:
+ parts = []
+ if self.before.get(path):
+ parts.append(self.before[path].mtime)
+ if self.after.get(path):
+ parts.append(self.after[path].mtime)
+ t += ' (mtime: %s)' % ('->'.join(map(repr, parts)))
+ s.append(t)
+ if len(s) == 1:
+ s.append(' (no changes)')
+ return '\n'.join(s)
+
+class Snapshot(IterableUserDict):
+
+ """
+ Represents a snapshot of a set of files. Has a dictionary-like
+ interface, keyed relative to ``base_path``
+ """
+
+ def __init__(self, base_path, files=None, ignore_wildcards=(),
+ ignore_paths=(), ignore_hidden=True):
+ self.base_path = base_path
+ self.ignore_wildcards = ignore_wildcards
+ self.ignore_hidden = ignore_hidden
+ self.ignore_paths = ignore_paths
+ self.calculated = None
+ self.data = files or {}
+ if files is None:
+ self.find_files()
+
+ ############################################################
+ ## File finding
+ ############################################################
+
+ def find_files(self):
+ """
+ Find all the files under the base path, and put them in
+ ``self.data``
+ """
+ self._find_traverse('', self.data)
+ self.calculated = datetime.now()
+
+ def _ignore_file(self, fn):
+ if fn in self.ignore_paths:
+ return True
+ if self.ignore_hidden and os.path.basename(fn).startswith('.'):
+ return True
+ for pat in self.ignore_wildcards:
+ if fnmatch(fn, pat):
+ return True
+ return False
+
+ def _ignore_file(self, fn):
+ if fn in self.ignore_paths:
+ return True
+ if self.ignore_hidden and os.path.basename(fn).startswith('.'):
+ return True
+ return False
+
+ def _find_traverse(self, path, result):
+ full = os.path.join(self.base_path, path)
+ if os.path.isdir(full):
+ if path:
+ # Don't actually include the base path
+ result[path] = Dir(self.base_path, path)
+ for fn in os.listdir(full):
+ fn = os.path.join(path, fn)
+ if self._ignore_file(fn):
+ continue
+ self._find_traverse(fn, result)
+ else:
+ result[path] = File(self.base_path, path)
+
+ def __repr__(self):
+ return '<%s in %r from %r>' % (
+ self.__class__.__name__, self.base_path,
+ self.calculated or '(no calculation done)')
+
+ def compare_expected(self, expected, comparison=operator.eq,
+ differ=None, not_found=None,
+ include_success=False):
+ """
+ Compares a dictionary of ``path: content`` to the
+ found files. Comparison is done by equality, or the
+ ``comparison(actual_content, expected_content)`` function given.
+
+ Returns dictionary of differences, keyed by path. Each
+ difference is either noted, or the output of
+ ``differ(actual_content, expected_content)`` is given.
+
+ If a file does not exist and ``not_found`` is given, then
+ ``not_found(path)`` is put in.
+ """
+ result = {}
+ for path in expected:
+ orig_path = path
+ path = path.strip('/')
+ if path not in self.data:
+ if not_found:
+ msg = not_found(path)
+ else:
+ msg = 'not found'
+ result[path] = msg
+ continue
+ expected_content = expected[orig_path]
+ file = self.data[path]
+ actual_content = file.bytes
+ if not comparison(actual_content, expected_content):
+ if differ:
+ msg = differ(actual_content, expected_content)
+ else:
+ if len(actual_content) < len(expected_content):
+ msg = 'differ (%i bytes smaller)' % (
+ len(expected_content) - len(actual_content))
+ elif len(actual_content) > len(expected_content):
+ msg = 'differ (%i bytes larger)' % (
+ len(actual_content) - len(expected_content))
+ else:
+ msg = 'diff (same size)'
+ result[path] = msg
+ elif include_success:
+ result[path] = 'same!'
+ return result
+
+ def diff_to_now(self):
+ return Diff(self, self.clone())
+
+ def clone(self):
+ return self.__class__(base_path=self.base_path,
+ ignore_wildcards=self.ignore_wildcards,
+ ignore_paths=self.ignore_paths,
+ ignore_hidden=self.ignore_hidden)
+
+class File(object):
+
+ """
+ Represents a single file found as the result of a command.
+
+ Has attributes:
+
+ ``path``:
+ The path of the file, relative to the ``base_path``
+
+ ``full``:
+ The full path
+
+ ``stat``:
+ The results of ``os.stat``. Also ``mtime`` and ``size``
+ contain the ``.st_mtime`` and ``st_size`` of the stat.
+
+ ``bytes``:
+ The contents of the file.
+
+ You may use the ``in`` operator with these objects (tested against
+ the contents of the file), and the ``.mustcontain()`` method.
+ """
+
+ file = True
+ dir = False
+
+ def __init__(self, base_path, path):
+ self.base_path = base_path
+ self.path = path
+ self.full = os.path.join(base_path, path)
+ self.stat = os.stat(self.full)
+ self.mtime = self.stat.st_mtime
+ self.size = self.stat.st_size
+ self._bytes = None
+
+ def bytes__get(self):
+ if self._bytes is None:
+ f = open(self.full, 'rb')
+ self._bytes = f.read()
+ f.close()
+ return self._bytes
+ bytes = property(bytes__get)
+
+ def __contains__(self, s):
+ return s in self.bytes
+
+ def mustcontain(self, s):
+ __tracebackhide__ = True
+ bytes = self.bytes
+ if s not in bytes:
+ print 'Could not find %r in:' % s
+ print bytes
+ assert s in bytes
+
+ def __repr__(self):
+ return '<%s %s:%s>' % (
+ self.__class__.__name__,
+ self.base_path, self.path)
+
+class Dir(File):
+
+ """
+ Represents a directory created by a command.
+ """
+
+ file = False
+ dir = True
+
+ def __init__(self, base_path, path):
+ self.base_path = base_path
+ self.path = path
+ self.full = os.path.join(base_path, path)
+ self.size = 'N/A'
+ self.mtime = 'N/A'
+
+ def __repr__(self):
+ return '<%s %s:%s>' % (
+ self.__class__.__name__,
+ self.base_path, self.path)
+
+ def bytes__get(self):
+ raise NotImplementedError(
+ "Directory %r doesn't have content" % self)
+
+ bytes = property(bytes__get)
+
+
+def _space_prefix(pref, full, sep=None, indent=None, include_sep=True):
+ """
+ Anything shared by pref and full will be replaced with spaces
+ in full, and full returned.
+
+ Example::
+
+ >>> _space_prefix('/foo/bar', '/foo')
+ ' /bar'
+ """
+ if sep is None:
+ sep = os.path.sep
+ pref = pref.split(sep)
+ full = full.split(sep)
+ padding = []
+ while pref and full and pref[0] == full[0]:
+ if indent is None:
+ padding.append(' ' * (len(full[0]) + len(sep)))
+ else:
+ padding.append(' ' * indent)
+ full.pop(0)
+ pref.pop(0)
+ if padding:
+ if include_sep:
+ return ''.join(padding) + sep + sep.join(full)
+ else:
+ return ''.join(padding) + sep.join(full)
+ else:
+ return sep.join(full)
+
+def report_expected_diffs(diffs, colorize=False):
+ """
+ Takes the output of compare_expected, and returns a string
+ description of the differences.
+ """
+ if not diffs:
+ return 'No differences'
+ diffs = diffs.items()
+ diffs.sort()
+ s = []
+ last = ''
+ for path, desc in diffs:
+ t = _space_prefix(last, path, indent=4, include_sep=False)
+ if colorize:
+ t = color_line(t, 11)
+ last = path
+ if len(desc.splitlines()) > 1:
+ cur_indent = len(re.search(r'^[ ]*', t).group(0))
+ desc = indent(cur_indent+2, desc)
+ if colorize:
+ t += '\n'
+ for line in desc.splitlines():
+ if line.strip().startswith('+'):
+ line = color_line(line, 10)
+ elif line.strip().startswith('-'):
+ line = color_line(line, 9)
+ else:
+ line = color_line(line, 14)
+ t += line+'\n'
+ else:
+ t += '\n' + desc
+ else:
+ t += ' '+desc
+ s.append(t)
+ s.append('Files with differences: %s' % len(diffs))
+ return '\n'.join(s)
+
+def color_code(foreground=None, background=None):
+ """
+ 0 black
+ 1 red
+ 2 green
+ 3 yellow
+ 4 blue
+ 5 magenta (purple)
+ 6 cyan
+ 7 white (gray)
+
+ Add 8 to get high-intensity
+ """
+ if foreground is None and background is None:
+ # Reset
+ return '\x1b[0m'
+ codes = []
+ if foreground is None:
+ codes.append('[39m')
+ elif foreground > 7:
+ codes.append('[1m')
+ codes.append('[%im' % (22+foreground))
+ else:
+ codes.append('[%im' % (30+foreground))
+ if background is None:
+ codes.append('[49m')
+ else:
+ codes.append('[%im' % (40+background))
+ return '\x1b' + '\x1b'.join(codes)
+
+def color_line(line, foreground=None, background=None):
+ match = re.search(r'^(\s*)', line)
+ return (match.group(1) + color_code(foreground, background)
+ + line[match.end():] + color_code())
+
+def indent(indent, text):
+ return '\n'.join(
+ [' '*indent + l for l in text.splitlines()])
+
+def show_diff(actual_content, expected_content):
+ actual_lines = [l.strip() for l in actual_content.splitlines()
+ if l.strip()]
+ expected_lines = [l.strip() for l in expected_content.splitlines()
+ if l.strip()]
+ if len(actual_lines) == len(expected_lines) == 1:
+ return '%r not %r' % (actual_lines[0], expected_lines[0])
+ if not actual_lines:
+ return 'Empty; should have:\n'+expected_content
+ import difflib
+ return '\n'.join(difflib.ndiff(actual_lines, expected_lines))
diff --git a/paste/debug/prints.py b/paste/debug/prints.py
new file mode 100644
index 0000000..d30fc8f
--- /dev/null
+++ b/paste/debug/prints.py
@@ -0,0 +1,148 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware that displays everything that is printed inline in
+application pages.
+
+Anything printed during the request will get captured and included on
+the page. It will usually be included as a floating element in the
+top right hand corner of the page. If you want to override this
+you can include a tag in your template where it will be placed::
+
+ <pre id="paste-debug-prints"></pre>
+
+You might want to include ``style="white-space: normal"``, as all the
+whitespace will be quoted, and this allows the text to wrap if
+necessary.
+
+"""
+
+from cStringIO import StringIO
+import re
+import cgi
+from paste.util import threadedprint
+from paste import wsgilib
+from paste import response
+import sys
+
+_threadedprint_installed = False
+
+__all__ = ['PrintDebugMiddleware']
+
+class TeeFile(object):
+
+ def __init__(self, files):
+ self.files = files
+
+ def write(self, v):
+ if isinstance(v, unicode):
+ # WSGI is picky in this case
+ v = str(v)
+ for file in self.files:
+ file.write(v)
+
+class PrintDebugMiddleware(object):
+
+ """
+ This middleware captures all the printed statements, and inlines
+ them in HTML pages, so that you can see all the (debug-intended)
+ print statements in the page itself.
+
+ There are two keys added to the environment to control this:
+ ``environ['paste.printdebug_listeners']`` is a list of functions
+ that will be called everytime something is printed.
+
+ ``environ['paste.remove_printdebug']`` is a function that, if
+ called, will disable printing of output for that request.
+
+ If you have ``replace_stdout=True`` then stdout is replaced, not
+ captured.
+ """
+
+ log_template = (
+ '<pre style="width: 40%%; border: 2px solid #000; white-space: normal; '
+ 'background-color: #ffd; color: #000; float: right;">'
+ '<b style="border-bottom: 1px solid #000">Log messages</b><br>'
+ '%s</pre>')
+
+ def __init__(self, app, global_conf=None, force_content_type=False,
+ print_wsgi_errors=True, replace_stdout=False):
+ # @@: global_conf should be handled separately and only for
+ # the entry point
+ self.app = app
+ self.force_content_type = force_content_type
+ if isinstance(print_wsgi_errors, basestring):
+ from paste.deploy.converters import asbool
+ print_wsgi_errors = asbool(print_wsgi_errors)
+ self.print_wsgi_errors = print_wsgi_errors
+ self.replace_stdout = replace_stdout
+ self._threaded_print_stdout = None
+
+ def __call__(self, environ, start_response):
+ global _threadedprint_installed
+ if environ.get('paste.testing'):
+ # In a testing environment this interception isn't
+ # useful:
+ return self.app(environ, start_response)
+ if (not _threadedprint_installed
+ or self._threaded_print_stdout is not sys.stdout):
+ # @@: Not strictly threadsafe
+ _threadedprint_installed = True
+ threadedprint.install(leave_stdout=not self.replace_stdout)
+ self._threaded_print_stdout = sys.stdout
+ removed = []
+ def remove_printdebug():
+ removed.append(None)
+ environ['paste.remove_printdebug'] = remove_printdebug
+ logged = StringIO()
+ listeners = [logged]
+ environ['paste.printdebug_listeners'] = listeners
+ if self.print_wsgi_errors:
+ listeners.append(environ['wsgi.errors'])
+ replacement_stdout = TeeFile(listeners)
+ threadedprint.register(replacement_stdout)
+ try:
+ status, headers, body = wsgilib.intercept_output(
+ environ, self.app)
+ if status is None:
+ # Some error occurred
+ status = '500 Server Error'
+ headers = [('Content-type', 'text/html')]
+ start_response(status, headers)
+ if not body:
+ body = 'An error occurred'
+ content_type = response.header_value(headers, 'content-type')
+ if (removed or
+ (not self.force_content_type and
+ (not content_type
+ or not content_type.startswith('text/html')))):
+ if replacement_stdout == logged:
+ # Then the prints will be lost, unless...
+ environ['wsgi.errors'].write(logged.getvalue())
+ start_response(status, headers)
+ return [body]
+ response.remove_header(headers, 'content-length')
+ body = self.add_log(body, logged.getvalue())
+ start_response(status, headers)
+ return [body]
+ finally:
+ threadedprint.deregister()
+
+ _body_re = re.compile(r'<body[^>]*>', re.I)
+ _explicit_re = re.compile(r'<pre\s*[^>]*id="paste-debug-prints".*?>',
+ re.I+re.S)
+
+ def add_log(self, html, log):
+ if not log:
+ return html
+ text = cgi.escape(log)
+ text = text.replace('\n', '<br>')
+ text = text.replace(' ', '&nbsp; ')
+ match = self._explicit_re.search(html)
+ if not match:
+ text = self.log_template % text
+ match = self._body_re.search(html)
+ if not match:
+ return text + html
+ else:
+ return html[:match.end()] + text + html[match.end():]
diff --git a/paste/debug/profile.py b/paste/debug/profile.py
new file mode 100644
index 0000000..8e2d40a
--- /dev/null
+++ b/paste/debug/profile.py
@@ -0,0 +1,227 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware that profiles the request and displays profiling
+information at the bottom of each page.
+"""
+
+
+import sys
+import os
+import hotshot
+import hotshot.stats
+import threading
+import cgi
+import time
+from cStringIO import StringIO
+from paste import response
+
+__all__ = ['ProfileMiddleware', 'profile_decorator']
+
+class ProfileMiddleware(object):
+
+ """
+ Middleware that profiles all requests.
+
+ All HTML pages will have profiling information appended to them.
+ The data is isolated to that single request, and does not include
+ data from previous requests.
+
+ This uses the ``hotshot`` module, which affects performance of the
+ application. It also runs in a single-threaded mode, so it is
+ only usable in development environments.
+ """
+
+ style = ('clear: both; background-color: #ff9; color: #000; '
+ 'border: 2px solid #000; padding: 5px;')
+
+ def __init__(self, app, global_conf=None,
+ log_filename='profile.log.tmp',
+ limit=40):
+ self.app = app
+ self.lock = threading.Lock()
+ self.log_filename = log_filename
+ self.limit = limit
+
+ def __call__(self, environ, start_response):
+ catch_response = []
+ body = []
+ def replace_start_response(status, headers, exc_info=None):
+ catch_response.extend([status, headers])
+ start_response(status, headers, exc_info)
+ return body.append
+ def run_app():
+ app_iter = self.app(environ, replace_start_response)
+ try:
+ body.extend(app_iter)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ self.lock.acquire()
+ try:
+ prof = hotshot.Profile(self.log_filename)
+ prof.addinfo('URL', environ.get('PATH_INFO', ''))
+ try:
+ prof.runcall(run_app)
+ finally:
+ prof.close()
+ body = ''.join(body)
+ headers = catch_response[1]
+ content_type = response.header_value(headers, 'content-type')
+ if content_type is None or not content_type.startswith('text/html'):
+ # We can't add info to non-HTML output
+ return [body]
+ stats = hotshot.stats.load(self.log_filename)
+ stats.strip_dirs()
+ stats.sort_stats('time', 'calls')
+ output = capture_output(stats.print_stats, self.limit)
+ output_callers = capture_output(
+ stats.print_callers, self.limit)
+ body += '<pre style="%s">%s\n%s</pre>' % (
+ self.style, cgi.escape(output), cgi.escape(output_callers))
+ return [body]
+ finally:
+ self.lock.release()
+
+def capture_output(func, *args, **kw):
+ # Not threadsafe! (that's okay when ProfileMiddleware uses it,
+ # though, since it synchronizes itself.)
+ out = StringIO()
+ old_stdout = sys.stdout
+ sys.stdout = out
+ try:
+ func(*args, **kw)
+ finally:
+ sys.stdout = old_stdout
+ return out.getvalue()
+
+def profile_decorator(**options):
+
+ """
+ Profile a single function call.
+
+ Used around a function, like::
+
+ @profile_decorator(options...)
+ def ...
+
+ All calls to the function will be profiled. The options are
+ all keywords, and are:
+
+ log_file:
+ The filename to log to (or ``'stdout'`` or ``'stderr'``).
+ Default: stderr.
+ display_limit:
+ Only show the top N items, default: 20.
+ sort_stats:
+ A list of string-attributes to sort on. Default
+ ``('time', 'calls')``.
+ strip_dirs:
+ Strip directories/module names from files? Default True.
+ add_info:
+ If given, this info will be added to the report (for your
+ own tracking). Default: none.
+ log_filename:
+ The temporary filename to log profiling data to. Default;
+ ``./profile_data.log.tmp``
+ no_profile:
+ If true, then don't actually profile anything. Useful for
+ conditional profiling.
+ """
+
+ if options.get('no_profile'):
+ def decorator(func):
+ return func
+ return decorator
+ def decorator(func):
+ def replacement(*args, **kw):
+ return DecoratedProfile(func, **options)(*args, **kw)
+ return replacement
+ return decorator
+
+class DecoratedProfile(object):
+
+ lock = threading.Lock()
+
+ def __init__(self, func, **options):
+ self.func = func
+ self.options = options
+
+ def __call__(self, *args, **kw):
+ self.lock.acquire()
+ try:
+ return self.profile(self.func, *args, **kw)
+ finally:
+ self.lock.release()
+
+ def profile(self, func, *args, **kw):
+ ops = self.options
+ prof_filename = ops.get('log_filename', 'profile_data.log.tmp')
+ prof = hotshot.Profile(prof_filename)
+ prof.addinfo('Function Call',
+ self.format_function(func, *args, **kw))
+ if ops.get('add_info'):
+ prof.addinfo('Extra info', ops['add_info'])
+ exc_info = None
+ try:
+ start_time = time.time()
+ try:
+ result = prof.runcall(func, *args, **kw)
+ except:
+ exc_info = sys.exc_info()
+ end_time = time.time()
+ finally:
+ prof.close()
+ stats = hotshot.stats.load(prof_filename)
+ os.unlink(prof_filename)
+ if ops.get('strip_dirs', True):
+ stats.strip_dirs()
+ stats.sort_stats(*ops.get('sort_stats', ('time', 'calls')))
+ display_limit = ops.get('display_limit', 20)
+ output = capture_output(stats.print_stats, display_limit)
+ output_callers = capture_output(
+ stats.print_callers, display_limit)
+ output_file = ops.get('log_file')
+ if output_file in (None, 'stderr'):
+ f = sys.stderr
+ elif output_file in ('-', 'stdout'):
+ f = sys.stdout
+ else:
+ f = open(output_file, 'a')
+ f.write('\n%s\n' % ('-'*60))
+ f.write('Date: %s\n' % time.strftime('%c'))
+ f.write('Function call: %s\n'
+ % self.format_function(func, *args, **kw))
+ f.write('Wall time: %0.2f seconds\n'
+ % (end_time - start_time))
+ f.write(output)
+ f.write(output_callers)
+ if output_file not in (None, '-', 'stdout', 'stderr'):
+ f.close()
+ if exc_info:
+ # We captured an exception earlier, now we re-raise it
+ raise exc_info[0], exc_info[1], exc_info[2]
+ return result
+
+ def format_function(self, func, *args, **kw):
+ args = map(repr, args)
+ args.extend(
+ ['%s=%r' % (k, v) for k, v in kw.items()])
+ return '%s(%s)' % (func.__name__, ', '.join(args))
+
+
+def make_profile_middleware(
+ app, global_conf,
+ log_filename='profile.log.tmp',
+ limit=40):
+ """
+ Wrap the application in a component that will profile each
+ request. The profiling data is then appended to the output
+ of each page.
+
+ Note that this serializes all requests (i.e., removing
+ concurrency). Therefore never use this in production.
+ """
+ limit = int(limit)
+ return ProfileMiddleware(
+ app, log_filename=log_filename, limit=limit)
diff --git a/paste/debug/testserver.py b/paste/debug/testserver.py
new file mode 100755
index 0000000..26c477a
--- /dev/null
+++ b/paste/debug/testserver.py
@@ -0,0 +1,93 @@
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+WSGI Test Server
+
+This builds upon paste.util.baseserver to customize it for regressions
+where using raw_interactive won't do.
+
+
+"""
+import time
+from paste.httpserver import *
+
+class WSGIRegressionServer(WSGIServer):
+ """
+ A threaded WSGIServer for use in regression testing. To use this
+ module, call serve(application, regression=True), and then call
+ server.accept() to let it handle one request. When finished, use
+ server.stop() to shutdown the server. Note that all pending requests
+ are processed before the server shuts down.
+ """
+ defaulttimeout = 10
+ def __init__ (self, *args, **kwargs):
+ WSGIServer.__init__(self, *args, **kwargs)
+ self.stopping = []
+ self.pending = []
+ self.timeout = self.defaulttimeout
+ # this is a local connection, be quick
+ self.socket.settimeout(2)
+ def serve_forever(self):
+ from threading import Thread
+ thread = Thread(target=self.serve_pending)
+ thread.start()
+ def reset_expires(self):
+ if self.timeout:
+ self.expires = time.time() + self.timeout
+ def close_request(self, *args, **kwargs):
+ WSGIServer.close_request(self, *args, **kwargs)
+ self.pending.pop()
+ self.reset_expires()
+ def serve_pending(self):
+ self.reset_expires()
+ while not self.stopping or self.pending:
+ now = time.time()
+ if now > self.expires and self.timeout:
+ # note regression test doesn't handle exceptions in
+ # threads very well; so we just print and exit
+ print "\nWARNING: WSGIRegressionServer timeout exceeded\n"
+ break
+ if self.pending:
+ self.handle_request()
+ time.sleep(.1)
+ def stop(self):
+ """ stop the server (called from tester's thread) """
+ self.stopping.append(True)
+ def accept(self, count = 1):
+ """ accept another request (called from tester's thread) """
+ assert not self.stopping
+ [self.pending.append(True) for x in range(count)]
+
+def serve(application, host=None, port=None, handler=None):
+ server = WSGIRegressionServer(application, host, port, handler)
+ print "serving on %s:%s" % server.server_address
+ server.serve_forever()
+ return server
+
+if __name__ == '__main__':
+ import urllib
+ from paste.wsgilib import dump_environ
+ server = serve(dump_environ)
+ baseuri = ("http://%s:%s" % server.server_address)
+
+ def fetch(path):
+ # tell the server to humor exactly one more request
+ server.accept(1)
+ # not needed; but this is what you do if the server
+ # may not respond in a resonable time period
+ import socket
+ socket.setdefaulttimeout(5)
+ # build a uri, fetch and return
+ return urllib.urlopen(baseuri + path).read()
+
+ assert "PATH_INFO: /foo" in fetch("/foo")
+ assert "PATH_INFO: /womble" in fetch("/womble")
+
+ # ok, let's make one more final request...
+ server.accept(1)
+ # and then schedule a stop()
+ server.stop()
+ # and then... fetch it...
+ urllib.urlopen(baseuri)
diff --git a/paste/debug/watchthreads.py b/paste/debug/watchthreads.py
new file mode 100644
index 0000000..1048213
--- /dev/null
+++ b/paste/debug/watchthreads.py
@@ -0,0 +1,347 @@
+"""
+Watches the key ``paste.httpserver.thread_pool`` to see how many
+threads there are and report on any wedged threads.
+"""
+import sys
+import cgi
+import time
+import traceback
+from cStringIO import StringIO
+from thread import get_ident
+from paste import httpexceptions
+from paste.request import construct_url, parse_formvars
+from paste.util.template import HTMLTemplate, bunch
+
+page_template = HTMLTemplate('''
+<html>
+ <head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table.environ tr td {
+ border-bottom: #bbb 1px solid;
+ }
+ table.environ tr td.bottom {
+ border-bottom: none;
+ }
+ table.thread {
+ border: 1px solid #000;
+ margin-bottom: 1em;
+ }
+ table.thread tr td {
+ border-bottom: #999 1px solid;
+ padding-right: 1em;
+ }
+ table.thread tr td.bottom {
+ border-bottom: none;
+ }
+ table.thread tr.this_thread td {
+ background-color: #006;
+ color: #fff;
+ }
+ a.button {
+ background-color: #ddd;
+ border: #aaa outset 2px;
+ text-decoration: none;
+ margin-top: 10px;
+ font-size: 80%;
+ color: #000;
+ }
+ a.button:hover {
+ background-color: #eee;
+ border: #bbb outset 2px;
+ }
+ a.button:active {
+ border: #bbb inset 2px;
+ }
+ </style>
+ <title>{{title}}</title>
+ </head>
+ <body>
+ <h1>{{title}}</h1>
+ {{if kill_thread_id}}
+ <div style="background-color: #060; color: #fff;
+ border: 2px solid #000;">
+ Thread {{kill_thread_id}} killed
+ </div>
+ {{endif}}
+ <div>Pool size: {{nworkers}}
+ {{if actual_workers > nworkers}}
+ + {{actual_workers-nworkers}} extra
+ {{endif}}
+ ({{nworkers_used}} used including current request)<br>
+ idle: {{len(track_threads["idle"])}},
+ busy: {{len(track_threads["busy"])}},
+ hung: {{len(track_threads["hung"])}},
+ dying: {{len(track_threads["dying"])}},
+ zombie: {{len(track_threads["zombie"])}}</div>
+
+{{for thread in threads}}
+
+<table class="thread">
+ <tr {{if thread.thread_id == this_thread_id}}class="this_thread"{{endif}}>
+ <td>
+ <b>Thread</b>
+ {{if thread.thread_id == this_thread_id}}
+ (<i>this</i> request)
+ {{endif}}</td>
+ <td>
+ <b>{{thread.thread_id}}
+ {{if allow_kill}}
+ <form action="{{script_name}}/kill" method="POST"
+ style="display: inline">
+ <input type="hidden" name="thread_id" value="{{thread.thread_id}}">
+ <input type="submit" value="kill">
+ </form>
+ {{endif}}
+ </b>
+ </td>
+ </tr>
+ <tr>
+ <td>Time processing request</td>
+ <td>{{thread.time_html|html}}</td>
+ </tr>
+ <tr>
+ <td>URI</td>
+ <td>{{if thread.uri == 'unknown'}}
+ unknown
+ {{else}}<a href="{{thread.uri}}">{{thread.uri_short}}</a>
+ {{endif}}
+ </td>
+ <tr>
+ <td colspan="2" class="bottom">
+ <a href="#" class="button" style="width: 9em; display: block"
+ onclick="
+ var el = document.getElementById('environ-{{thread.thread_id}}');
+ if (el.style.display) {
+ el.style.display = '';
+ this.innerHTML = \'&#9662; Hide environ\';
+ } else {
+ el.style.display = 'none';
+ this.innerHTML = \'&#9656; Show environ\';
+ }
+ return false
+ ">&#9656; Show environ</a>
+
+ <div id="environ-{{thread.thread_id}}" style="display: none">
+ {{if thread.environ:}}
+ <table class="environ">
+ {{for loop, item in looper(sorted(thread.environ.items()))}}
+ {{py:key, value=item}}
+ <tr>
+ <td {{if loop.last}}class="bottom"{{endif}}>{{key}}</td>
+ <td {{if loop.last}}class="bottom"{{endif}}>{{value}}</td>
+ </tr>
+ {{endfor}}
+ </table>
+ {{else}}
+ Thread is in process of starting
+ {{endif}}
+ </div>
+
+ {{if thread.traceback}}
+ <a href="#" class="button" style="width: 9em; display: block"
+ onclick="
+ var el = document.getElementById('traceback-{{thread.thread_id}}');
+ if (el.style.display) {
+ el.style.display = '';
+ this.innerHTML = \'&#9662; Hide traceback\';
+ } else {
+ el.style.display = 'none';
+ this.innerHTML = \'&#9656; Show traceback\';
+ }
+ return false
+ ">&#9656; Show traceback</a>
+
+ <div id="traceback-{{thread.thread_id}}" style="display: none">
+ <pre class="traceback">{{thread.traceback}}</pre>
+ </div>
+ {{endif}}
+
+ </td>
+ </tr>
+</table>
+
+{{endfor}}
+
+ </body>
+</html>
+''', name='watchthreads.page_template')
+
+class WatchThreads(object):
+
+ """
+ Application that watches the threads in ``paste.httpserver``,
+ showing the length each thread has been working on a request.
+
+ If allow_kill is true, then you can kill errant threads through
+ this application.
+
+ This application can expose private information (specifically in
+ the environment, like cookies), so it should be protected.
+ """
+
+ def __init__(self, allow_kill=False):
+ self.allow_kill = allow_kill
+
+ def __call__(self, environ, start_response):
+ if 'paste.httpserver.thread_pool' not in environ:
+ start_response('403 Forbidden', [('Content-type', 'text/plain')])
+ return ['You must use the threaded Paste HTTP server to use this application']
+ if environ.get('PATH_INFO') == '/kill':
+ return self.kill(environ, start_response)
+ else:
+ return self.show(environ, start_response)
+
+ def show(self, environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/html')])
+ form = parse_formvars(environ)
+ if form.get('kill'):
+ kill_thread_id = form['kill']
+ else:
+ kill_thread_id = None
+ thread_pool = environ['paste.httpserver.thread_pool']
+ nworkers = thread_pool.nworkers
+ now = time.time()
+
+
+ workers = thread_pool.worker_tracker.items()
+ workers.sort(key=lambda v: v[1][0])
+ threads = []
+ for thread_id, (time_started, worker_environ) in workers:
+ thread = bunch()
+ threads.append(thread)
+ if worker_environ:
+ thread.uri = construct_url(worker_environ)
+ else:
+ thread.uri = 'unknown'
+ thread.thread_id = thread_id
+ thread.time_html = format_time(now-time_started)
+ thread.uri_short = shorten(thread.uri)
+ thread.environ = worker_environ
+ thread.traceback = traceback_thread(thread_id)
+
+ page = page_template.substitute(
+ title="Thread Pool Worker Tracker",
+ nworkers=nworkers,
+ actual_workers=len(thread_pool.workers),
+ nworkers_used=len(workers),
+ script_name=environ['SCRIPT_NAME'],
+ kill_thread_id=kill_thread_id,
+ allow_kill=self.allow_kill,
+ threads=threads,
+ this_thread_id=get_ident(),
+ track_threads=thread_pool.track_threads())
+
+ return [page]
+
+ def kill(self, environ, start_response):
+ if not self.allow_kill:
+ exc = httpexceptions.HTTPForbidden(
+ 'Killing threads has not been enabled. Shame on you '
+ 'for trying!')
+ return exc(environ, start_response)
+ vars = parse_formvars(environ)
+ thread_id = int(vars['thread_id'])
+ thread_pool = environ['paste.httpserver.thread_pool']
+ if thread_id not in thread_pool.worker_tracker:
+ exc = httpexceptions.PreconditionFailed(
+ 'You tried to kill thread %s, but it is not working on '
+ 'any requests' % thread_id)
+ return exc(environ, start_response)
+ thread_pool.kill_worker(thread_id)
+ script_name = environ['SCRIPT_NAME'] or '/'
+ exc = httpexceptions.HTTPFound(
+ headers=[('Location', script_name+'?kill=%s' % thread_id)])
+ return exc(environ, start_response)
+
+def traceback_thread(thread_id):
+ """
+ Returns a plain-text traceback of the given thread, or None if it
+ can't get a traceback.
+ """
+ if not hasattr(sys, '_current_frames'):
+ # Only 2.5 has support for this, with this special function
+ return None
+ frames = sys._current_frames()
+ if not thread_id in frames:
+ return None
+ frame = frames[thread_id]
+ out = StringIO()
+ traceback.print_stack(frame, file=out)
+ return out.getvalue()
+
+hide_keys = ['paste.httpserver.thread_pool']
+
+def format_environ(environ):
+ if environ is None:
+ return environ_template.substitute(
+ key='---',
+ value='No environment registered for this thread yet')
+ environ_rows = []
+ for key, value in sorted(environ.items()):
+ if key in hide_keys:
+ continue
+ try:
+ if key.upper() != key:
+ value = repr(value)
+ environ_rows.append(
+ environ_template.substitute(
+ key=cgi.escape(str(key)),
+ value=cgi.escape(str(value))))
+ except Exception, e:
+ environ_rows.append(
+ environ_template.substitute(
+ key=cgi.escape(str(key)),
+ value='Error in <code>repr()</code>: %s' % e))
+ return ''.join(environ_rows)
+
+def format_time(time_length):
+ if time_length >= 60*60:
+ # More than an hour
+ time_string = '%i:%02i:%02i' % (int(time_length/60/60),
+ int(time_length/60) % 60,
+ time_length % 60)
+ elif time_length >= 120:
+ time_string = '%i:%02i' % (int(time_length/60),
+ time_length % 60)
+ elif time_length > 60:
+ time_string = '%i sec' % time_length
+ elif time_length > 1:
+ time_string = '%0.1f sec' % time_length
+ else:
+ time_string = '%0.2f sec' % time_length
+ if time_length < 5:
+ return time_string
+ elif time_length < 120:
+ return '<span style="color: #900">%s</span>' % time_string
+ else:
+ return '<span style="background-color: #600; color: #fff">%s</span>' % time_string
+
+def shorten(s):
+ if len(s) > 60:
+ return s[:40]+'...'+s[-10:]
+ else:
+ return s
+
+def make_watch_threads(global_conf, allow_kill=False):
+ from paste.deploy.converters import asbool
+ return WatchThreads(allow_kill=asbool(allow_kill))
+make_watch_threads.__doc__ = WatchThreads.__doc__
+
+def make_bad_app(global_conf, pause=0):
+ pause = int(pause)
+ def bad_app(environ, start_response):
+ import thread
+ if pause:
+ time.sleep(pause)
+ else:
+ count = 0
+ while 1:
+ print "I'm alive %s (%s)" % (count, thread.get_ident())
+ time.sleep(10)
+ count += 1
+ start_response('200 OK', [('content-type', 'text/plain')])
+ return ['OK, paused %s seconds' % pause]
+ return bad_app
diff --git a/paste/debug/wdg_validate.py b/paste/debug/wdg_validate.py
new file mode 100644
index 0000000..d3678fb
--- /dev/null
+++ b/paste/debug/wdg_validate.py
@@ -0,0 +1,121 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware that tests the validity of all generated HTML using the
+`WDG HTML Validator <http://www.htmlhelp.com/tools/validator/>`_
+"""
+
+from cStringIO import StringIO
+try:
+ import subprocess
+except ImportError:
+ from paste.util import subprocess24 as subprocess
+from paste.response import header_value
+import re
+import cgi
+
+__all__ = ['WDGValidateMiddleware']
+
+class WDGValidateMiddleware(object):
+
+ """
+ Middleware that checks HTML and appends messages about the validity of
+ the HTML. Uses: http://www.htmlhelp.com/tools/validator/ -- interacts
+ with the command line client. Use the configuration ``wdg_path`` to
+ override the path (default: looks for ``validate`` in $PATH).
+
+ To install, in your web context's __init__.py::
+
+ def urlparser_wrap(environ, start_response, app):
+ return wdg_validate.WDGValidateMiddleware(app)(
+ environ, start_response)
+
+ Or in your configuration::
+
+ middleware.append('paste.wdg_validate.WDGValidateMiddleware')
+ """
+
+ _end_body_regex = re.compile(r'</body>', re.I)
+
+ def __init__(self, app, global_conf=None, wdg_path='validate'):
+ self.app = app
+ self.wdg_path = wdg_path
+
+ def __call__(self, environ, start_response):
+ output = StringIO()
+ response = []
+
+ def writer_start_response(status, headers, exc_info=None):
+ response.extend((status, headers))
+ start_response(status, headers, exc_info)
+ return output.write
+
+ app_iter = self.app(environ, writer_start_response)
+ try:
+ for s in app_iter:
+ output.write(s)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ page = output.getvalue()
+ status, headers = response
+ v = header_value(headers, 'content-type') or ''
+ if (not v.startswith('text/html')
+ and not v.startswith('text/xhtml')
+ and not v.startswith('application/xhtml')):
+ # Can't validate
+ # @@: Should validate CSS too... but using what?
+ return [page]
+ ops = []
+ if v.startswith('text/xhtml+xml'):
+ ops.append('--xml')
+ # @@: Should capture encoding too
+ html_errors = self.call_wdg_validate(
+ self.wdg_path, ops, page)
+ if html_errors:
+ page = self.add_error(page, html_errors)[0]
+ headers.remove(
+ ('Content-Length',
+ str(header_value(headers, 'content-length'))))
+ headers.append(('Content-Length', str(len(page))))
+ return [page]
+
+ def call_wdg_validate(self, wdg_path, ops, page):
+ if subprocess is None:
+ raise ValueError(
+ "This middleware requires the subprocess module from "
+ "Python 2.4")
+ proc = subprocess.Popen([wdg_path] + ops,
+ shell=False,
+ close_fds=True,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ stdout = proc.communicate(page)[0]
+ proc.wait()
+ return stdout
+
+ def add_error(self, html_page, html_errors):
+ add_text = ('<pre style="background-color: #ffd; color: #600; '
+ 'border: 1px solid #000;">%s</pre>'
+ % cgi.escape(html_errors))
+ match = self._end_body_regex.search(html_page)
+ if match:
+ return [html_page[:match.start()]
+ + add_text
+ + html_page[match.start():]]
+ else:
+ return [html_page + add_text]
+
+def make_wdg_validate_middleware(
+ app, global_conf, wdg_path='validate'):
+ """
+ Wraps the application in the WDG validator from
+ http://www.htmlhelp.com/tools/validator/
+
+ Validation errors are appended to the text of each page.
+ You can configure this by giving the path to the validate
+ executable (by default picked up from $PATH)
+ """
+ return WDGValidateMiddleware(
+ app, global_conf, wdg_path=wdg_path)
diff --git a/paste/errordocument.py b/paste/errordocument.py
new file mode 100644
index 0000000..e57c162
--- /dev/null
+++ b/paste/errordocument.py
@@ -0,0 +1,383 @@
+# (c) 2005-2006 James Gardner <james@pythonweb.org>
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware to display error documents for certain status codes
+
+The middleware in this module can be used to intercept responses with
+specified status codes and internally forward the request to an appropriate
+URL where the content can be displayed to the user as an error document.
+"""
+
+import warnings
+import sys
+from urlparse import urlparse
+from paste.recursive import ForwardRequestException, RecursiveMiddleware, RecursionLoop
+from paste.util import converters
+from paste.response import replace_header
+
+def forward(app, codes):
+ """
+ Intercepts a response with a particular status code and returns the
+ content from a specified URL instead.
+
+ The arguments are:
+
+ ``app``
+ The WSGI application or middleware chain.
+
+ ``codes``
+ A dictionary of integer status codes and the URL to be displayed
+ if the response uses that code.
+
+ For example, you might want to create a static file to display a
+ "File Not Found" message at the URL ``/error404.html`` and then use
+ ``forward`` middleware to catch all 404 status codes and display the page
+ you created. In this example ``app`` is your exisiting WSGI
+ applicaiton::
+
+ from paste.errordocument import forward
+ app = forward(app, codes={404:'/error404.html'})
+
+ """
+ for code in codes:
+ if not isinstance(code, int):
+ raise TypeError('All status codes should be type int. '
+ '%s is not valid'%repr(code))
+
+ def error_codes_mapper(code, message, environ, global_conf, codes):
+ if codes.has_key(code):
+ return codes[code]
+ else:
+ return None
+
+ #return _StatusBasedRedirect(app, error_codes_mapper, codes=codes)
+ return RecursiveMiddleware(
+ StatusBasedForward(
+ app,
+ error_codes_mapper,
+ codes=codes,
+ )
+ )
+
+class StatusKeeper(object):
+ def __init__(self, app, status, url, headers):
+ self.app = app
+ self.status = status
+ self.url = url
+ self.headers = headers
+
+ def __call__(self, environ, start_response):
+ def keep_status_start_response(status, headers, exc_info=None):
+ for header, value in headers:
+ if header.lower() == 'set-cookie':
+ self.headers.append((header, value))
+ else:
+ replace_header(self.headers, header, value)
+ return start_response(self.status, self.headers, exc_info)
+ parts = self.url.split('?')
+ environ['PATH_INFO'] = parts[0]
+ if len(parts) > 1:
+ environ['QUERY_STRING'] = parts[1]
+ else:
+ environ['QUERY_STRING'] = ''
+ #raise Exception(self.url, self.status)
+ try:
+ return self.app(environ, keep_status_start_response)
+ except RecursionLoop, e:
+ environ['wsgi.errors'].write('Recursion error getting error page: %s\n' % e)
+ keep_status_start_response('500 Server Error', [('Content-type', 'text/plain')], sys.exc_info())
+ return ['Error: %s. (Error page could not be fetched)'
+ % self.status]
+
+
+class StatusBasedForward(object):
+ """
+ Middleware that lets you test a response against a custom mapper object to
+ programatically determine whether to internally forward to another URL and
+ if so, which URL to forward to.
+
+ If you don't need the full power of this middleware you might choose to use
+ the simpler ``forward`` middleware instead.
+
+ The arguments are:
+
+ ``app``
+ The WSGI application or middleware chain.
+
+ ``mapper``
+ A callable that takes a status code as the
+ first parameter, a message as the second, and accepts optional environ,
+ global_conf and named argments afterwards. It should return a
+ URL to forward to or ``None`` if the code is not to be intercepted.
+
+ ``global_conf``
+ Optional default configuration from your config file. If ``debug`` is
+ set to ``true`` a message will be written to ``wsgi.errors`` on each
+ internal forward stating the URL forwarded to.
+
+ ``**params``
+ Optional, any other configuration and extra arguments you wish to
+ pass which will in turn be passed back to the custom mapper object.
+
+ Here is an example where a ``404 File Not Found`` status response would be
+ redirected to the URL ``/error?code=404&message=File%20Not%20Found``. This
+ could be useful for passing the status code and message into another
+ application to display an error document:
+
+ .. code-block:: python
+
+ from paste.errordocument import StatusBasedForward
+ from paste.recursive import RecursiveMiddleware
+ from urllib import urlencode
+
+ def error_mapper(code, message, environ, global_conf, kw)
+ if code in [404, 500]:
+ params = urlencode({'message':message, 'code':code})
+ url = '/error?'%(params)
+ return url
+ else:
+ return None
+
+ app = RecursiveMiddleware(
+ StatusBasedForward(app, mapper=error_mapper),
+ )
+
+ """
+
+ def __init__(self, app, mapper, global_conf=None, **params):
+ if global_conf is None:
+ global_conf = {}
+ # @@: global_conf shouldn't really come in here, only in a
+ # separate make_status_based_forward function
+ if global_conf:
+ self.debug = converters.asbool(global_conf.get('debug', False))
+ else:
+ self.debug = False
+ self.application = app
+ self.mapper = mapper
+ self.global_conf = global_conf
+ self.params = params
+
+ def __call__(self, environ, start_response):
+ url = []
+ writer = []
+
+ def change_response(status, headers, exc_info=None):
+ status_code = status.split(' ')
+ try:
+ code = int(status_code[0])
+ except (ValueError, TypeError):
+ raise Exception(
+ 'StatusBasedForward middleware '
+ 'received an invalid status code %s'%repr(status_code[0])
+ )
+ message = ' '.join(status_code[1:])
+ new_url = self.mapper(
+ code,
+ message,
+ environ,
+ self.global_conf,
+ **self.params
+ )
+ if not (new_url == None or isinstance(new_url, str)):
+ raise TypeError(
+ 'Expected the url to internally '
+ 'redirect to in the StatusBasedForward mapper'
+ 'to be a string or None, not %r' % new_url)
+ if new_url:
+ url.append([new_url, status, headers])
+ # We have to allow the app to write stuff, even though
+ # we'll ignore it:
+ return [].append
+ else:
+ return start_response(status, headers, exc_info)
+
+ app_iter = self.application(environ, change_response)
+ if url:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+
+ def factory(app):
+ return StatusKeeper(app, status=url[0][1], url=url[0][0],
+ headers=url[0][2])
+ raise ForwardRequestException(factory=factory)
+ else:
+ return app_iter
+
+def make_errordocument(app, global_conf, **kw):
+ """
+ Paste Deploy entry point to create a error document wrapper.
+
+ Use like::
+
+ [filter-app:main]
+ use = egg:Paste#errordocument
+ next = real-app
+ 500 = /lib/msg/500.html
+ 404 = /lib/msg/404.html
+ """
+ map = {}
+ for status, redir_loc in kw.items():
+ try:
+ status = int(status)
+ except ValueError:
+ raise ValueError('Bad status code: %r' % status)
+ map[status] = redir_loc
+ forwarder = forward(app, map)
+ return forwarder
+
+__pudge_all__ = [
+ 'forward',
+ 'make_errordocument',
+ 'empty_error',
+ 'make_empty_error',
+ 'StatusBasedForward',
+]
+
+
+###############################################################################
+## Deprecated
+###############################################################################
+
+def custom_forward(app, mapper, global_conf=None, **kw):
+ """
+ Deprectated; use StatusBasedForward instead.
+ """
+ warnings.warn(
+ "errordocuments.custom_forward has been deprecated; please "
+ "use errordocuments.StatusBasedForward",
+ DeprecationWarning, 2)
+ if global_conf is None:
+ global_conf = {}
+ return _StatusBasedRedirect(app, mapper, global_conf, **kw)
+
+class _StatusBasedRedirect(object):
+ """
+ Deprectated; use StatusBasedForward instead.
+ """
+ def __init__(self, app, mapper, global_conf=None, **kw):
+
+ warnings.warn(
+ "errordocuments._StatusBasedRedirect has been deprecated; please "
+ "use errordocuments.StatusBasedForward",
+ DeprecationWarning, 2)
+
+ if global_conf is None:
+ global_conf = {}
+ self.application = app
+ self.mapper = mapper
+ self.global_conf = global_conf
+ self.kw = kw
+ self.fallback_template = """
+ <html>
+ <head>
+ <title>Error %(code)s</title>
+ </html>
+ <body>
+ <h1>Error %(code)s</h1>
+ <p>%(message)s</p>
+ <hr>
+ <p>
+ Additionally an error occurred trying to produce an
+ error document. A description of the error was logged
+ to <tt>wsgi.errors</tt>.
+ </p>
+ </body>
+ </html>
+ """
+
+ def __call__(self, environ, start_response):
+ url = []
+ code_message = []
+ try:
+ def change_response(status, headers, exc_info=None):
+ new_url = None
+ parts = status.split(' ')
+ try:
+ code = int(parts[0])
+ except (ValueError, TypeError):
+ raise Exception(
+ '_StatusBasedRedirect middleware '
+ 'received an invalid status code %s'%repr(parts[0])
+ )
+ message = ' '.join(parts[1:])
+ new_url = self.mapper(
+ code,
+ message,
+ environ,
+ self.global_conf,
+ self.kw
+ )
+ if not (new_url == None or isinstance(new_url, str)):
+ raise TypeError(
+ 'Expected the url to internally '
+ 'redirect to in the _StatusBasedRedirect error_mapper'
+ 'to be a string or None, not %s'%repr(new_url)
+ )
+ if new_url:
+ url.append(new_url)
+ code_message.append([code, message])
+ return start_response(status, headers, exc_info)
+ app_iter = self.application(environ, change_response)
+ except:
+ try:
+ import sys
+ error = str(sys.exc_info()[1])
+ except:
+ error = ''
+ try:
+ code, message = code_message[0]
+ except:
+ code, message = ['', '']
+ environ['wsgi.errors'].write(
+ 'Error occurred in _StatusBasedRedirect '
+ 'intercepting the response: '+str(error)
+ )
+ return [self.fallback_template
+ % {'message': message, 'code': code}]
+ else:
+ if url:
+ url_ = url[0]
+ new_environ = {}
+ for k, v in environ.items():
+ if k != 'QUERY_STRING':
+ new_environ['QUERY_STRING'] = urlparse(url_)[4]
+ else:
+ new_environ[k] = v
+ class InvalidForward(Exception):
+ pass
+ def eat_start_response(status, headers, exc_info=None):
+ """
+ We don't want start_response to do anything since it
+ has already been called
+ """
+ if status[:3] != '200':
+ raise InvalidForward(
+ "The URL %s to internally forward "
+ "to in order to create an error document did not "
+ "return a '200' status code." % url_
+ )
+ forward = environ['paste.recursive.forward']
+ old_start_response = forward.start_response
+ forward.start_response = eat_start_response
+ try:
+ app_iter = forward(url_, new_environ)
+ except InvalidForward, e:
+ code, message = code_message[0]
+ environ['wsgi.errors'].write(
+ 'Error occurred in '
+ '_StatusBasedRedirect redirecting '
+ 'to new URL: '+str(url[0])
+ )
+ return [
+ self.fallback_template%{
+ 'message':message,
+ 'code':code,
+ }
+ ]
+ else:
+ forward.start_response = old_start_response
+ return app_iter
+ else:
+ return app_iter
diff --git a/paste/evalexception/__init__.py b/paste/evalexception/__init__.py
new file mode 100644
index 0000000..a19cf85
--- /dev/null
+++ b/paste/evalexception/__init__.py
@@ -0,0 +1,7 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+An exception handler for interactive debugging
+"""
+from paste.evalexception.middleware import EvalException
+
diff --git a/paste/evalexception/evalcontext.py b/paste/evalexception/evalcontext.py
new file mode 100644
index 0000000..dca2a97
--- /dev/null
+++ b/paste/evalexception/evalcontext.py
@@ -0,0 +1,68 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+from cStringIO import StringIO
+import traceback
+import threading
+import pdb
+import sys
+
+exec_lock = threading.Lock()
+
+class EvalContext(object):
+
+ """
+ Class that represents a interactive interface. It has its own
+ namespace. Use eval_context.exec_expr(expr) to run commands; the
+ output of those commands is returned, as are print statements.
+
+ This is essentially what doctest does, and is taken directly from
+ doctest.
+ """
+
+ def __init__(self, namespace, globs):
+ self.namespace = namespace
+ self.globs = globs
+
+ def exec_expr(self, s):
+ out = StringIO()
+ exec_lock.acquire()
+ save_stdout = sys.stdout
+ try:
+ debugger = _OutputRedirectingPdb(save_stdout)
+ debugger.reset()
+ pdb.set_trace = debugger.set_trace
+ sys.stdout = out
+ try:
+ code = compile(s, '<web>', "single", 0, 1)
+ exec code in self.namespace, self.globs
+ debugger.set_continue()
+ except KeyboardInterrupt:
+ raise
+ except:
+ traceback.print_exc(file=out)
+ debugger.set_continue()
+ finally:
+ sys.stdout = save_stdout
+ exec_lock.release()
+ return out.getvalue()
+
+# From doctest
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
diff --git a/paste/evalexception/media/MochiKit.packed.js b/paste/evalexception/media/MochiKit.packed.js
new file mode 100644
index 0000000..15027d9
--- /dev/null
+++ b/paste/evalexception/media/MochiKit.packed.js
@@ -0,0 +1,7829 @@
+/***
+
+ MochiKit.MochiKit 1.4.2 : PACKED VERSION
+
+ THIS FILE IS AUTOMATICALLY GENERATED. If creating patches, please
+ diff against the source tree, not this file.
+
+ See <http://mochikit.com/> for documentation, downloads, license, etc.
+
+ (c) 2005 Bob Ippolito. All rights Reserved.
+
+***/
+
+if(typeof (dojo)!="undefined"){
+dojo.provide("MochiKit.Base");
+}
+if(typeof (MochiKit)=="undefined"){
+MochiKit={};
+}
+if(typeof (MochiKit.Base)=="undefined"){
+MochiKit.Base={};
+}
+if(typeof (MochiKit.__export__)=="undefined"){
+MochiKit.__export__=(MochiKit.__compat__||(typeof (JSAN)=="undefined"&&typeof (dojo)=="undefined"));
+}
+MochiKit.Base.VERSION="1.4.2";
+MochiKit.Base.NAME="MochiKit.Base";
+MochiKit.Base.update=function(_1,_2){
+if(_1===null||_1===undefined){
+_1={};
+}
+for(var i=1;i<arguments.length;i++){
+var o=arguments[i];
+if(typeof (o)!="undefined"&&o!==null){
+for(var k in o){
+_1[k]=o[k];
+}
+}
+}
+return _1;
+};
+MochiKit.Base.update(MochiKit.Base,{__repr__:function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+},toString:function(){
+return this.__repr__();
+},camelize:function(_6){
+var _7=_6.split("-");
+var cc=_7[0];
+for(var i=1;i<_7.length;i++){
+cc+=_7[i].charAt(0).toUpperCase()+_7[i].substring(1);
+}
+return cc;
+},counter:function(n){
+if(arguments.length===0){
+n=1;
+}
+return function(){
+return n++;
+};
+},clone:function(_b){
+var me=arguments.callee;
+if(arguments.length==1){
+me.prototype=_b;
+return new me();
+}
+},_deps:function(_d,_e){
+if(!(_d in MochiKit)){
+MochiKit[_d]={};
+}
+if(typeof (dojo)!="undefined"){
+dojo.provide("MochiKit."+_d);
+}
+for(var i=0;i<_e.length;i++){
+if(typeof (dojo)!="undefined"){
+dojo.require("MochiKit."+_e[i]);
+}
+if(typeof (JSAN)!="undefined"){
+JSAN.use("MochiKit."+_e[i],[]);
+}
+if(!(_e[i] in MochiKit)){
+throw "MochiKit."+_d+" depends on MochiKit."+_e[i]+"!";
+}
+}
+},_flattenArray:function(res,lst){
+for(var i=0;i<lst.length;i++){
+var o=lst[i];
+if(o instanceof Array){
+arguments.callee(res,o);
+}else{
+res.push(o);
+}
+}
+return res;
+},flattenArray:function(lst){
+return MochiKit.Base._flattenArray([],lst);
+},flattenArguments:function(lst){
+var res=[];
+var m=MochiKit.Base;
+var _18=m.extend(null,arguments);
+while(_18.length){
+var o=_18.shift();
+if(o&&typeof (o)=="object"&&typeof (o.length)=="number"){
+for(var i=o.length-1;i>=0;i--){
+_18.unshift(o[i]);
+}
+}else{
+res.push(o);
+}
+}
+return res;
+},extend:function(_1b,obj,_1d){
+if(!_1d){
+_1d=0;
+}
+if(obj){
+var l=obj.length;
+if(typeof (l)!="number"){
+if(typeof (MochiKit.Iter)!="undefined"){
+obj=MochiKit.Iter.list(obj);
+l=obj.length;
+}else{
+throw new TypeError("Argument not an array-like and MochiKit.Iter not present");
+}
+}
+if(!_1b){
+_1b=[];
+}
+for(var i=_1d;i<l;i++){
+_1b.push(obj[i]);
+}
+}
+return _1b;
+},updatetree:function(_20,obj){
+if(_20===null||_20===undefined){
+_20={};
+}
+for(var i=1;i<arguments.length;i++){
+var o=arguments[i];
+if(typeof (o)!="undefined"&&o!==null){
+for(var k in o){
+var v=o[k];
+if(typeof (_20[k])=="object"&&typeof (v)=="object"){
+arguments.callee(_20[k],v);
+}else{
+_20[k]=v;
+}
+}
+}
+}
+return _20;
+},setdefault:function(_26,obj){
+if(_26===null||_26===undefined){
+_26={};
+}
+for(var i=1;i<arguments.length;i++){
+var o=arguments[i];
+for(var k in o){
+if(!(k in _26)){
+_26[k]=o[k];
+}
+}
+}
+return _26;
+},keys:function(obj){
+var _2c=[];
+for(var _2d in obj){
+_2c.push(_2d);
+}
+return _2c;
+},values:function(obj){
+var _2f=[];
+for(var _30 in obj){
+_2f.push(obj[_30]);
+}
+return _2f;
+},items:function(obj){
+var _32=[];
+var e;
+for(var _34 in obj){
+var v;
+try{
+v=obj[_34];
+}
+catch(e){
+continue;
+}
+_32.push([_34,v]);
+}
+return _32;
+},_newNamedError:function(_36,_37,_38){
+_38.prototype=new MochiKit.Base.NamedError(_36.NAME+"."+_37);
+_36[_37]=_38;
+},operator:{truth:function(a){
+return !!a;
+},lognot:function(a){
+return !a;
+},identity:function(a){
+return a;
+},not:function(a){
+return ~a;
+},neg:function(a){
+return -a;
+},add:function(a,b){
+return a+b;
+},sub:function(a,b){
+return a-b;
+},div:function(a,b){
+return a/b;
+},mod:function(a,b){
+return a%b;
+},mul:function(a,b){
+return a*b;
+},and:function(a,b){
+return a&b;
+},or:function(a,b){
+return a|b;
+},xor:function(a,b){
+return a^b;
+},lshift:function(a,b){
+return a<<b;
+},rshift:function(a,b){
+return a>>b;
+},zrshift:function(a,b){
+return a>>>b;
+},eq:function(a,b){
+return a==b;
+},ne:function(a,b){
+return a!=b;
+},gt:function(a,b){
+return a>b;
+},ge:function(a,b){
+return a>=b;
+},lt:function(a,b){
+return a<b;
+},le:function(a,b){
+return a<=b;
+},seq:function(a,b){
+return a===b;
+},sne:function(a,b){
+return a!==b;
+},ceq:function(a,b){
+return MochiKit.Base.compare(a,b)===0;
+},cne:function(a,b){
+return MochiKit.Base.compare(a,b)!==0;
+},cgt:function(a,b){
+return MochiKit.Base.compare(a,b)==1;
+},cge:function(a,b){
+return MochiKit.Base.compare(a,b)!=-1;
+},clt:function(a,b){
+return MochiKit.Base.compare(a,b)==-1;
+},cle:function(a,b){
+return MochiKit.Base.compare(a,b)!=1;
+},logand:function(a,b){
+return a&&b;
+},logor:function(a,b){
+return a||b;
+},contains:function(a,b){
+return b in a;
+}},forwardCall:function(_76){
+return function(){
+return this[_76].apply(this,arguments);
+};
+},itemgetter:function(_77){
+return function(arg){
+return arg[_77];
+};
+},typeMatcher:function(){
+var _79={};
+for(var i=0;i<arguments.length;i++){
+var typ=arguments[i];
+_79[typ]=typ;
+}
+return function(){
+for(var i=0;i<arguments.length;i++){
+if(!(typeof (arguments[i]) in _79)){
+return false;
+}
+}
+return true;
+};
+},isNull:function(){
+for(var i=0;i<arguments.length;i++){
+if(arguments[i]!==null){
+return false;
+}
+}
+return true;
+},isUndefinedOrNull:function(){
+for(var i=0;i<arguments.length;i++){
+var o=arguments[i];
+if(!(typeof (o)=="undefined"||o===null)){
+return false;
+}
+}
+return true;
+},isEmpty:function(obj){
+return !MochiKit.Base.isNotEmpty.apply(this,arguments);
+},isNotEmpty:function(obj){
+for(var i=0;i<arguments.length;i++){
+var o=arguments[i];
+if(!(o&&o.length)){
+return false;
+}
+}
+return true;
+},isArrayLike:function(){
+for(var i=0;i<arguments.length;i++){
+var o=arguments[i];
+var typ=typeof (o);
+if((typ!="object"&&!(typ=="function"&&typeof (o.item)=="function"))||o===null||typeof (o.length)!="number"||o.nodeType===3||o.nodeType===4){
+return false;
+}
+}
+return true;
+},isDateLike:function(){
+for(var i=0;i<arguments.length;i++){
+var o=arguments[i];
+if(typeof (o)!="object"||o===null||typeof (o.getTime)!="function"){
+return false;
+}
+}
+return true;
+},xmap:function(fn){
+if(fn===null){
+return MochiKit.Base.extend(null,arguments,1);
+}
+var _8a=[];
+for(var i=1;i<arguments.length;i++){
+_8a.push(fn(arguments[i]));
+}
+return _8a;
+},map:function(fn,lst){
+var m=MochiKit.Base;
+var itr=MochiKit.Iter;
+var _90=m.isArrayLike;
+if(arguments.length<=2){
+if(!_90(lst)){
+if(itr){
+lst=itr.list(lst);
+if(fn===null){
+return lst;
+}
+}else{
+throw new TypeError("Argument not an array-like and MochiKit.Iter not present");
+}
+}
+if(fn===null){
+return m.extend(null,lst);
+}
+var _91=[];
+for(var i=0;i<lst.length;i++){
+_91.push(fn(lst[i]));
+}
+return _91;
+}else{
+if(fn===null){
+fn=Array;
+}
+var _93=null;
+for(i=1;i<arguments.length;i++){
+if(!_90(arguments[i])){
+if(itr){
+return itr.list(itr.imap.apply(null,arguments));
+}else{
+throw new TypeError("Argument not an array-like and MochiKit.Iter not present");
+}
+}
+var l=arguments[i].length;
+if(_93===null||_93>l){
+_93=l;
+}
+}
+_91=[];
+for(i=0;i<_93;i++){
+var _95=[];
+for(var j=1;j<arguments.length;j++){
+_95.push(arguments[j][i]);
+}
+_91.push(fn.apply(this,_95));
+}
+return _91;
+}
+},xfilter:function(fn){
+var _98=[];
+if(fn===null){
+fn=MochiKit.Base.operator.truth;
+}
+for(var i=1;i<arguments.length;i++){
+var o=arguments[i];
+if(fn(o)){
+_98.push(o);
+}
+}
+return _98;
+},filter:function(fn,lst,_9d){
+var _9e=[];
+var m=MochiKit.Base;
+if(!m.isArrayLike(lst)){
+if(MochiKit.Iter){
+lst=MochiKit.Iter.list(lst);
+}else{
+throw new TypeError("Argument not an array-like and MochiKit.Iter not present");
+}
+}
+if(fn===null){
+fn=m.operator.truth;
+}
+if(typeof (Array.prototype.filter)=="function"){
+return Array.prototype.filter.call(lst,fn,_9d);
+}else{
+if(typeof (_9d)=="undefined"||_9d===null){
+for(var i=0;i<lst.length;i++){
+var o=lst[i];
+if(fn(o)){
+_9e.push(o);
+}
+}
+}else{
+for(i=0;i<lst.length;i++){
+o=lst[i];
+if(fn.call(_9d,o)){
+_9e.push(o);
+}
+}
+}
+}
+return _9e;
+},_wrapDumbFunction:function(_a2){
+return function(){
+switch(arguments.length){
+case 0:
+return _a2();
+case 1:
+return _a2(arguments[0]);
+case 2:
+return _a2(arguments[0],arguments[1]);
+case 3:
+return _a2(arguments[0],arguments[1],arguments[2]);
+}
+var _a3=[];
+for(var i=0;i<arguments.length;i++){
+_a3.push("arguments["+i+"]");
+}
+return eval("(func("+_a3.join(",")+"))");
+};
+},methodcaller:function(_a5){
+var _a6=MochiKit.Base.extend(null,arguments,1);
+if(typeof (_a5)=="function"){
+return function(obj){
+return _a5.apply(obj,_a6);
+};
+}else{
+return function(obj){
+return obj[_a5].apply(obj,_a6);
+};
+}
+},method:function(_a9,_aa){
+var m=MochiKit.Base;
+return m.bind.apply(this,m.extend([_aa,_a9],arguments,2));
+},compose:function(f1,f2){
+var _ae=[];
+var m=MochiKit.Base;
+if(arguments.length===0){
+throw new TypeError("compose() requires at least one argument");
+}
+for(var i=0;i<arguments.length;i++){
+var fn=arguments[i];
+if(typeof (fn)!="function"){
+throw new TypeError(m.repr(fn)+" is not a function");
+}
+_ae.push(fn);
+}
+return function(){
+var _b2=arguments;
+for(var i=_ae.length-1;i>=0;i--){
+_b2=[_ae[i].apply(this,_b2)];
+}
+return _b2[0];
+};
+},bind:function(_b4,_b5){
+if(typeof (_b4)=="string"){
+_b4=_b5[_b4];
+}
+var _b6=_b4.im_func;
+var _b7=_b4.im_preargs;
+var _b8=_b4.im_self;
+var m=MochiKit.Base;
+if(typeof (_b4)=="function"&&typeof (_b4.apply)=="undefined"){
+_b4=m._wrapDumbFunction(_b4);
+}
+if(typeof (_b6)!="function"){
+_b6=_b4;
+}
+if(typeof (_b5)!="undefined"){
+_b8=_b5;
+}
+if(typeof (_b7)=="undefined"){
+_b7=[];
+}else{
+_b7=_b7.slice();
+}
+m.extend(_b7,arguments,2);
+var _ba=function(){
+var _bb=arguments;
+var me=arguments.callee;
+if(me.im_preargs.length>0){
+_bb=m.concat(me.im_preargs,_bb);
+}
+var _bd=me.im_self;
+if(!_bd){
+_bd=this;
+}
+return me.im_func.apply(_bd,_bb);
+};
+_ba.im_self=_b8;
+_ba.im_func=_b6;
+_ba.im_preargs=_b7;
+return _ba;
+},bindLate:function(_be,_bf){
+var m=MochiKit.Base;
+if(typeof (_be)!="string"){
+return m.bind.apply(this,arguments);
+}
+var _c1=m.extend([],arguments,2);
+var _c2=function(){
+var _c3=arguments;
+var me=arguments.callee;
+if(me.im_preargs.length>0){
+_c3=m.concat(me.im_preargs,_c3);
+}
+var _c5=me.im_self;
+if(!_c5){
+_c5=this;
+}
+return _c5[me.im_func].apply(_c5,_c3);
+};
+_c2.im_self=_bf;
+_c2.im_func=_be;
+_c2.im_preargs=_c1;
+return _c2;
+},bindMethods:function(_c6){
+var _c7=MochiKit.Base.bind;
+for(var k in _c6){
+var _c9=_c6[k];
+if(typeof (_c9)=="function"){
+_c6[k]=_c7(_c9,_c6);
+}
+}
+},registerComparator:function(_ca,_cb,_cc,_cd){
+MochiKit.Base.comparatorRegistry.register(_ca,_cb,_cc,_cd);
+},_primitives:{"boolean":true,"string":true,"number":true},compare:function(a,b){
+if(a==b){
+return 0;
+}
+var _d0=(typeof (a)=="undefined"||a===null);
+var _d1=(typeof (b)=="undefined"||b===null);
+if(_d0&&_d1){
+return 0;
+}else{
+if(_d0){
+return -1;
+}else{
+if(_d1){
+return 1;
+}
+}
+}
+var m=MochiKit.Base;
+var _d3=m._primitives;
+if(!(typeof (a) in _d3&&typeof (b) in _d3)){
+try{
+return m.comparatorRegistry.match(a,b);
+}
+catch(e){
+if(e!=m.NotFound){
+throw e;
+}
+}
+}
+if(a<b){
+return -1;
+}else{
+if(a>b){
+return 1;
+}
+}
+var _d4=m.repr;
+throw new TypeError(_d4(a)+" and "+_d4(b)+" can not be compared");
+},compareDateLike:function(a,b){
+return MochiKit.Base.compare(a.getTime(),b.getTime());
+},compareArrayLike:function(a,b){
+var _d9=MochiKit.Base.compare;
+var _da=a.length;
+var _db=0;
+if(_da>b.length){
+_db=1;
+_da=b.length;
+}else{
+if(_da<b.length){
+_db=-1;
+}
+}
+for(var i=0;i<_da;i++){
+var cmp=_d9(a[i],b[i]);
+if(cmp){
+return cmp;
+}
+}
+return _db;
+},registerRepr:function(_de,_df,_e0,_e1){
+MochiKit.Base.reprRegistry.register(_de,_df,_e0,_e1);
+},repr:function(o){
+if(typeof (o)=="undefined"){
+return "undefined";
+}else{
+if(o===null){
+return "null";
+}
+}
+try{
+if(typeof (o.__repr__)=="function"){
+return o.__repr__();
+}else{
+if(typeof (o.repr)=="function"&&o.repr!=arguments.callee){
+return o.repr();
+}
+}
+return MochiKit.Base.reprRegistry.match(o);
+}
+catch(e){
+if(typeof (o.NAME)=="string"&&(o.toString==Function.prototype.toString||o.toString==Object.prototype.toString)){
+return o.NAME;
+}
+}
+try{
+var _e3=(o+"");
+}
+catch(e){
+return "["+typeof (o)+"]";
+}
+if(typeof (o)=="function"){
+_e3=_e3.replace(/^\s+/,"").replace(/\s+/g," ");
+_e3=_e3.replace(/,(\S)/,", $1");
+var idx=_e3.indexOf("{");
+if(idx!=-1){
+_e3=_e3.substr(0,idx)+"{...}";
+}
+}
+return _e3;
+},reprArrayLike:function(o){
+var m=MochiKit.Base;
+return "["+m.map(m.repr,o).join(", ")+"]";
+},reprString:function(o){
+return ("\""+o.replace(/(["\\])/g,"\\$1")+"\"").replace(/[\f]/g,"\\f").replace(/[\b]/g,"\\b").replace(/[\n]/g,"\\n").replace(/[\t]/g,"\\t").replace(/[\v]/g,"\\v").replace(/[\r]/g,"\\r");
+},reprNumber:function(o){
+return o+"";
+},registerJSON:function(_e9,_ea,_eb,_ec){
+MochiKit.Base.jsonRegistry.register(_e9,_ea,_eb,_ec);
+},evalJSON:function(){
+return eval("("+MochiKit.Base._filterJSON(arguments[0])+")");
+},_filterJSON:function(s){
+var m=s.match(/^\s*\/\*(.*)\*\/\s*$/);
+if(m){
+return m[1];
+}
+return s;
+},serializeJSON:function(o){
+var _f0=typeof (o);
+if(_f0=="number"||_f0=="boolean"){
+return o+"";
+}else{
+if(o===null){
+return "null";
+}else{
+if(_f0=="string"){
+var res="";
+for(var i=0;i<o.length;i++){
+var c=o.charAt(i);
+if(c=="\""){
+res+="\\\"";
+}else{
+if(c=="\\"){
+res+="\\\\";
+}else{
+if(c=="\b"){
+res+="\\b";
+}else{
+if(c=="\f"){
+res+="\\f";
+}else{
+if(c=="\n"){
+res+="\\n";
+}else{
+if(c=="\r"){
+res+="\\r";
+}else{
+if(c=="\t"){
+res+="\\t";
+}else{
+if(o.charCodeAt(i)<=31){
+var hex=o.charCodeAt(i).toString(16);
+if(hex.length<2){
+hex="0"+hex;
+}
+res+="\\u00"+hex.toUpperCase();
+}else{
+res+=c;
+}
+}
+}
+}
+}
+}
+}
+}
+}
+return "\""+res+"\"";
+}
+}
+}
+var me=arguments.callee;
+var _f6;
+if(typeof (o.__json__)=="function"){
+_f6=o.__json__();
+if(o!==_f6){
+return me(_f6);
+}
+}
+if(typeof (o.json)=="function"){
+_f6=o.json();
+if(o!==_f6){
+return me(_f6);
+}
+}
+if(_f0!="function"&&typeof (o.length)=="number"){
+var res=[];
+for(var i=0;i<o.length;i++){
+var val=me(o[i]);
+if(typeof (val)!="string"){
+continue;
+}
+res.push(val);
+}
+return "["+res.join(", ")+"]";
+}
+var m=MochiKit.Base;
+try{
+_f6=m.jsonRegistry.match(o);
+if(o!==_f6){
+return me(_f6);
+}
+}
+catch(e){
+if(e!=m.NotFound){
+throw e;
+}
+}
+if(_f0=="undefined"){
+throw new TypeError("undefined can not be serialized as JSON");
+}
+if(_f0=="function"){
+return null;
+}
+res=[];
+for(var k in o){
+var _fa;
+if(typeof (k)=="number"){
+_fa="\""+k+"\"";
+}else{
+if(typeof (k)=="string"){
+_fa=me(k);
+}else{
+continue;
+}
+}
+val=me(o[k]);
+if(typeof (val)!="string"){
+continue;
+}
+res.push(_fa+":"+val);
+}
+return "{"+res.join(", ")+"}";
+},objEqual:function(a,b){
+return (MochiKit.Base.compare(a,b)===0);
+},arrayEqual:function(_fd,arr){
+if(_fd.length!=arr.length){
+return false;
+}
+return (MochiKit.Base.compare(_fd,arr)===0);
+},concat:function(){
+var _ff=[];
+var _100=MochiKit.Base.extend;
+for(var i=0;i<arguments.length;i++){
+_100(_ff,arguments[i]);
+}
+return _ff;
+},keyComparator:function(key){
+var m=MochiKit.Base;
+var _104=m.compare;
+if(arguments.length==1){
+return function(a,b){
+return _104(a[key],b[key]);
+};
+}
+var _107=m.extend(null,arguments);
+return function(a,b){
+var rval=0;
+for(var i=0;(rval===0)&&(i<_107.length);i++){
+var key=_107[i];
+rval=_104(a[key],b[key]);
+}
+return rval;
+};
+},reverseKeyComparator:function(key){
+var _10e=MochiKit.Base.keyComparator.apply(this,arguments);
+return function(a,b){
+return _10e(b,a);
+};
+},partial:function(func){
+var m=MochiKit.Base;
+return m.bind.apply(this,m.extend([func,undefined],arguments,1));
+},listMinMax:function(_113,lst){
+if(lst.length===0){
+return null;
+}
+var cur=lst[0];
+var _116=MochiKit.Base.compare;
+for(var i=1;i<lst.length;i++){
+var o=lst[i];
+if(_116(o,cur)==_113){
+cur=o;
+}
+}
+return cur;
+},objMax:function(){
+return MochiKit.Base.listMinMax(1,arguments);
+},objMin:function(){
+return MochiKit.Base.listMinMax(-1,arguments);
+},findIdentical:function(lst,_11a,_11b,end){
+if(typeof (end)=="undefined"||end===null){
+end=lst.length;
+}
+if(typeof (_11b)=="undefined"||_11b===null){
+_11b=0;
+}
+for(var i=_11b;i<end;i++){
+if(lst[i]===_11a){
+return i;
+}
+}
+return -1;
+},mean:function(){
+var sum=0;
+var m=MochiKit.Base;
+var args=m.extend(null,arguments);
+var _121=args.length;
+while(args.length){
+var o=args.shift();
+if(o&&typeof (o)=="object"&&typeof (o.length)=="number"){
+_121+=o.length-1;
+for(var i=o.length-1;i>=0;i--){
+sum+=o[i];
+}
+}else{
+sum+=o;
+}
+}
+if(_121<=0){
+throw new TypeError("mean() requires at least one argument");
+}
+return sum/_121;
+},median:function(){
+var data=MochiKit.Base.flattenArguments(arguments);
+if(data.length===0){
+throw new TypeError("median() requires at least one argument");
+}
+data.sort(compare);
+if(data.length%2==0){
+var _125=data.length/2;
+return (data[_125]+data[_125-1])/2;
+}else{
+return data[(data.length-1)/2];
+}
+},findValue:function(lst,_127,_128,end){
+if(typeof (end)=="undefined"||end===null){
+end=lst.length;
+}
+if(typeof (_128)=="undefined"||_128===null){
+_128=0;
+}
+var cmp=MochiKit.Base.compare;
+for(var i=_128;i<end;i++){
+if(cmp(lst[i],_127)===0){
+return i;
+}
+}
+return -1;
+},nodeWalk:function(node,_12d){
+var _12e=[node];
+var _12f=MochiKit.Base.extend;
+while(_12e.length){
+var res=_12d(_12e.shift());
+if(res){
+_12f(_12e,res);
+}
+}
+},nameFunctions:function(_131){
+var base=_131.NAME;
+if(typeof (base)=="undefined"){
+base="";
+}else{
+base=base+".";
+}
+for(var name in _131){
+var o=_131[name];
+if(typeof (o)=="function"&&typeof (o.NAME)=="undefined"){
+try{
+o.NAME=base+name;
+}
+catch(e){
+}
+}
+}
+},queryString:function(_135,_136){
+if(typeof (MochiKit.DOM)!="undefined"&&arguments.length==1&&(typeof (_135)=="string"||(typeof (_135.nodeType)!="undefined"&&_135.nodeType>0))){
+var kv=MochiKit.DOM.formContents(_135);
+_135=kv[0];
+_136=kv[1];
+}else{
+if(arguments.length==1){
+if(typeof (_135.length)=="number"&&_135.length==2){
+return arguments.callee(_135[0],_135[1]);
+}
+var o=_135;
+_135=[];
+_136=[];
+for(var k in o){
+var v=o[k];
+if(typeof (v)=="function"){
+continue;
+}else{
+if(MochiKit.Base.isArrayLike(v)){
+for(var i=0;i<v.length;i++){
+_135.push(k);
+_136.push(v[i]);
+}
+}else{
+_135.push(k);
+_136.push(v);
+}
+}
+}
+}
+}
+var rval=[];
+var len=Math.min(_135.length,_136.length);
+var _13e=MochiKit.Base.urlEncode;
+for(var i=0;i<len;i++){
+v=_136[i];
+if(typeof (v)!="undefined"&&v!==null){
+rval.push(_13e(_135[i])+"="+_13e(v));
+}
+}
+return rval.join("&");
+},parseQueryString:function(_13f,_140){
+var qstr=(_13f.charAt(0)=="?")?_13f.substring(1):_13f;
+var _142=qstr.replace(/\+/g,"%20").split(/\&amp\;|\&\#38\;|\&#x26;|\&/);
+var o={};
+var _144;
+if(typeof (decodeURIComponent)!="undefined"){
+_144=decodeURIComponent;
+}else{
+_144=unescape;
+}
+if(_140){
+for(var i=0;i<_142.length;i++){
+var pair=_142[i].split("=");
+var name=_144(pair.shift());
+if(!name){
+continue;
+}
+var arr=o[name];
+if(!(arr instanceof Array)){
+arr=[];
+o[name]=arr;
+}
+arr.push(_144(pair.join("=")));
+}
+}else{
+for(i=0;i<_142.length;i++){
+pair=_142[i].split("=");
+var name=pair.shift();
+if(!name){
+continue;
+}
+o[_144(name)]=_144(pair.join("="));
+}
+}
+return o;
+}});
+MochiKit.Base.AdapterRegistry=function(){
+this.pairs=[];
+};
+MochiKit.Base.AdapterRegistry.prototype={register:function(name,_14a,wrap,_14c){
+if(_14c){
+this.pairs.unshift([name,_14a,wrap]);
+}else{
+this.pairs.push([name,_14a,wrap]);
+}
+},match:function(){
+for(var i=0;i<this.pairs.length;i++){
+var pair=this.pairs[i];
+if(pair[1].apply(this,arguments)){
+return pair[2].apply(this,arguments);
+}
+}
+throw MochiKit.Base.NotFound;
+},unregister:function(name){
+for(var i=0;i<this.pairs.length;i++){
+var pair=this.pairs[i];
+if(pair[0]==name){
+this.pairs.splice(i,1);
+return true;
+}
+}
+return false;
+}};
+MochiKit.Base.EXPORT=["flattenArray","noop","camelize","counter","clone","extend","update","updatetree","setdefault","keys","values","items","NamedError","operator","forwardCall","itemgetter","typeMatcher","isCallable","isUndefined","isUndefinedOrNull","isNull","isEmpty","isNotEmpty","isArrayLike","isDateLike","xmap","map","xfilter","filter","methodcaller","compose","bind","bindLate","bindMethods","NotFound","AdapterRegistry","registerComparator","compare","registerRepr","repr","objEqual","arrayEqual","concat","keyComparator","reverseKeyComparator","partial","merge","listMinMax","listMax","listMin","objMax","objMin","nodeWalk","zip","urlEncode","queryString","serializeJSON","registerJSON","evalJSON","parseQueryString","findValue","findIdentical","flattenArguments","method","average","mean","median"];
+MochiKit.Base.EXPORT_OK=["nameFunctions","comparatorRegistry","reprRegistry","jsonRegistry","compareDateLike","compareArrayLike","reprArrayLike","reprString","reprNumber"];
+MochiKit.Base._exportSymbols=function(_152,_153){
+if(!MochiKit.__export__){
+return;
+}
+var all=_153.EXPORT_TAGS[":all"];
+for(var i=0;i<all.length;i++){
+_152[all[i]]=_153[all[i]];
+}
+};
+MochiKit.Base.__new__=function(){
+var m=this;
+m.noop=m.operator.identity;
+m.forward=m.forwardCall;
+m.find=m.findValue;
+if(typeof (encodeURIComponent)!="undefined"){
+m.urlEncode=function(_157){
+return encodeURIComponent(_157).replace(/\'/g,"%27");
+};
+}else{
+m.urlEncode=function(_158){
+return escape(_158).replace(/\+/g,"%2B").replace(/\"/g,"%22").rval.replace(/\'/g,"%27");
+};
+}
+m.NamedError=function(name){
+this.message=name;
+this.name=name;
+};
+m.NamedError.prototype=new Error();
+m.update(m.NamedError.prototype,{repr:function(){
+if(this.message&&this.message!=this.name){
+return this.name+"("+m.repr(this.message)+")";
+}else{
+return this.name+"()";
+}
+},toString:m.forwardCall("repr")});
+m.NotFound=new m.NamedError("MochiKit.Base.NotFound");
+m.listMax=m.partial(m.listMinMax,1);
+m.listMin=m.partial(m.listMinMax,-1);
+m.isCallable=m.typeMatcher("function");
+m.isUndefined=m.typeMatcher("undefined");
+m.merge=m.partial(m.update,null);
+m.zip=m.partial(m.map,null);
+m.average=m.mean;
+m.comparatorRegistry=new m.AdapterRegistry();
+m.registerComparator("dateLike",m.isDateLike,m.compareDateLike);
+m.registerComparator("arrayLike",m.isArrayLike,m.compareArrayLike);
+m.reprRegistry=new m.AdapterRegistry();
+m.registerRepr("arrayLike",m.isArrayLike,m.reprArrayLike);
+m.registerRepr("string",m.typeMatcher("string"),m.reprString);
+m.registerRepr("numbers",m.typeMatcher("number","boolean"),m.reprNumber);
+m.jsonRegistry=new m.AdapterRegistry();
+var all=m.concat(m.EXPORT,m.EXPORT_OK);
+m.EXPORT_TAGS={":common":m.concat(m.EXPORT_OK),":all":all};
+m.nameFunctions(this);
+};
+MochiKit.Base.__new__();
+if(MochiKit.__export__){
+compare=MochiKit.Base.compare;
+compose=MochiKit.Base.compose;
+serializeJSON=MochiKit.Base.serializeJSON;
+mean=MochiKit.Base.mean;
+median=MochiKit.Base.median;
+}
+MochiKit.Base._exportSymbols(this,MochiKit.Base);
+MochiKit.Base._deps("Iter",["Base"]);
+MochiKit.Iter.NAME="MochiKit.Iter";
+MochiKit.Iter.VERSION="1.4.2";
+MochiKit.Base.update(MochiKit.Iter,{__repr__:function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+},toString:function(){
+return this.__repr__();
+},registerIteratorFactory:function(name,_15c,_15d,_15e){
+MochiKit.Iter.iteratorRegistry.register(name,_15c,_15d,_15e);
+},isIterable:function(o){
+return o!=null&&(typeof (o.next)=="function"||typeof (o.iter)=="function");
+},iter:function(_160,_161){
+var self=MochiKit.Iter;
+if(arguments.length==2){
+return self.takewhile(function(a){
+return a!=_161;
+},_160);
+}
+if(typeof (_160.next)=="function"){
+return _160;
+}else{
+if(typeof (_160.iter)=="function"){
+return _160.iter();
+}
+}
+try{
+return self.iteratorRegistry.match(_160);
+}
+catch(e){
+var m=MochiKit.Base;
+if(e==m.NotFound){
+e=new TypeError(typeof (_160)+": "+m.repr(_160)+" is not iterable");
+}
+throw e;
+}
+},count:function(n){
+if(!n){
+n=0;
+}
+var m=MochiKit.Base;
+return {repr:function(){
+return "count("+n+")";
+},toString:m.forwardCall("repr"),next:m.counter(n)};
+},cycle:function(p){
+var self=MochiKit.Iter;
+var m=MochiKit.Base;
+var lst=[];
+var _16b=self.iter(p);
+return {repr:function(){
+return "cycle(...)";
+},toString:m.forwardCall("repr"),next:function(){
+try{
+var rval=_16b.next();
+lst.push(rval);
+return rval;
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+if(lst.length===0){
+this.next=function(){
+throw self.StopIteration;
+};
+}else{
+var i=-1;
+this.next=function(){
+i=(i+1)%lst.length;
+return lst[i];
+};
+}
+return this.next();
+}
+}};
+},repeat:function(elem,n){
+var m=MochiKit.Base;
+if(typeof (n)=="undefined"){
+return {repr:function(){
+return "repeat("+m.repr(elem)+")";
+},toString:m.forwardCall("repr"),next:function(){
+return elem;
+}};
+}
+return {repr:function(){
+return "repeat("+m.repr(elem)+", "+n+")";
+},toString:m.forwardCall("repr"),next:function(){
+if(n<=0){
+throw MochiKit.Iter.StopIteration;
+}
+n-=1;
+return elem;
+}};
+},next:function(_171){
+return _171.next();
+},izip:function(p,q){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+var next=self.next;
+var _177=m.map(self.iter,arguments);
+return {repr:function(){
+return "izip(...)";
+},toString:m.forwardCall("repr"),next:function(){
+return m.map(next,_177);
+}};
+},ifilter:function(pred,seq){
+var m=MochiKit.Base;
+seq=MochiKit.Iter.iter(seq);
+if(pred===null){
+pred=m.operator.truth;
+}
+return {repr:function(){
+return "ifilter(...)";
+},toString:m.forwardCall("repr"),next:function(){
+while(true){
+var rval=seq.next();
+if(pred(rval)){
+return rval;
+}
+}
+return undefined;
+}};
+},ifilterfalse:function(pred,seq){
+var m=MochiKit.Base;
+seq=MochiKit.Iter.iter(seq);
+if(pred===null){
+pred=m.operator.truth;
+}
+return {repr:function(){
+return "ifilterfalse(...)";
+},toString:m.forwardCall("repr"),next:function(){
+while(true){
+var rval=seq.next();
+if(!pred(rval)){
+return rval;
+}
+}
+return undefined;
+}};
+},islice:function(seq){
+var self=MochiKit.Iter;
+var m=MochiKit.Base;
+seq=self.iter(seq);
+var _183=0;
+var stop=0;
+var step=1;
+var i=-1;
+if(arguments.length==2){
+stop=arguments[1];
+}else{
+if(arguments.length==3){
+_183=arguments[1];
+stop=arguments[2];
+}else{
+_183=arguments[1];
+stop=arguments[2];
+step=arguments[3];
+}
+}
+return {repr:function(){
+return "islice("+["...",_183,stop,step].join(", ")+")";
+},toString:m.forwardCall("repr"),next:function(){
+var rval;
+while(i<_183){
+rval=seq.next();
+i++;
+}
+if(_183>=stop){
+throw self.StopIteration;
+}
+_183+=step;
+return rval;
+}};
+},imap:function(fun,p,q){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+var _18d=m.map(self.iter,m.extend(null,arguments,1));
+var map=m.map;
+var next=self.next;
+return {repr:function(){
+return "imap(...)";
+},toString:m.forwardCall("repr"),next:function(){
+return fun.apply(this,map(next,_18d));
+}};
+},applymap:function(fun,seq,self){
+seq=MochiKit.Iter.iter(seq);
+var m=MochiKit.Base;
+return {repr:function(){
+return "applymap(...)";
+},toString:m.forwardCall("repr"),next:function(){
+return fun.apply(self,seq.next());
+}};
+},chain:function(p,q){
+var self=MochiKit.Iter;
+var m=MochiKit.Base;
+if(arguments.length==1){
+return self.iter(arguments[0]);
+}
+var _198=m.map(self.iter,arguments);
+return {repr:function(){
+return "chain(...)";
+},toString:m.forwardCall("repr"),next:function(){
+while(_198.length>1){
+try{
+var _199=_198[0].next();
+return _199;
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+_198.shift();
+var _199=_198[0].next();
+return _199;
+}
+}
+if(_198.length==1){
+var arg=_198.shift();
+this.next=m.bind("next",arg);
+return this.next();
+}
+throw self.StopIteration;
+}};
+},takewhile:function(pred,seq){
+var self=MochiKit.Iter;
+seq=self.iter(seq);
+return {repr:function(){
+return "takewhile(...)";
+},toString:MochiKit.Base.forwardCall("repr"),next:function(){
+var rval=seq.next();
+if(!pred(rval)){
+this.next=function(){
+throw self.StopIteration;
+};
+this.next();
+}
+return rval;
+}};
+},dropwhile:function(pred,seq){
+seq=MochiKit.Iter.iter(seq);
+var m=MochiKit.Base;
+var bind=m.bind;
+return {"repr":function(){
+return "dropwhile(...)";
+},"toString":m.forwardCall("repr"),"next":function(){
+while(true){
+var rval=seq.next();
+if(!pred(rval)){
+break;
+}
+}
+this.next=bind("next",seq);
+return rval;
+}};
+},_tee:function(_1a4,sync,_1a6){
+sync.pos[_1a4]=-1;
+var m=MochiKit.Base;
+var _1a8=m.listMin;
+return {repr:function(){
+return "tee("+_1a4+", ...)";
+},toString:m.forwardCall("repr"),next:function(){
+var rval;
+var i=sync.pos[_1a4];
+if(i==sync.max){
+rval=_1a6.next();
+sync.deque.push(rval);
+sync.max+=1;
+sync.pos[_1a4]+=1;
+}else{
+rval=sync.deque[i-sync.min];
+sync.pos[_1a4]+=1;
+if(i==sync.min&&_1a8(sync.pos)!=sync.min){
+sync.min+=1;
+sync.deque.shift();
+}
+}
+return rval;
+}};
+},tee:function(_1ab,n){
+var rval=[];
+var sync={"pos":[],"deque":[],"max":-1,"min":-1};
+if(arguments.length==1||typeof (n)=="undefined"||n===null){
+n=2;
+}
+var self=MochiKit.Iter;
+_1ab=self.iter(_1ab);
+var _tee=self._tee;
+for(var i=0;i<n;i++){
+rval.push(_tee(i,sync,_1ab));
+}
+return rval;
+},list:function(_1b2){
+var rval;
+if(_1b2 instanceof Array){
+return _1b2.slice();
+}
+if(typeof (_1b2)=="function"&&!(_1b2 instanceof Function)&&typeof (_1b2.length)=="number"){
+rval=[];
+for(var i=0;i<_1b2.length;i++){
+rval.push(_1b2[i]);
+}
+return rval;
+}
+var self=MochiKit.Iter;
+_1b2=self.iter(_1b2);
+var rval=[];
+var _1b6;
+try{
+while(true){
+_1b6=_1b2.next();
+rval.push(_1b6);
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+return rval;
+}
+return undefined;
+},reduce:function(fn,_1b8,_1b9){
+var i=0;
+var x=_1b9;
+var self=MochiKit.Iter;
+_1b8=self.iter(_1b8);
+if(arguments.length<3){
+try{
+x=_1b8.next();
+}
+catch(e){
+if(e==self.StopIteration){
+e=new TypeError("reduce() of empty sequence with no initial value");
+}
+throw e;
+}
+i++;
+}
+try{
+while(true){
+x=fn(x,_1b8.next());
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+}
+return x;
+},range:function(){
+var _1bd=0;
+var stop=0;
+var step=1;
+if(arguments.length==1){
+stop=arguments[0];
+}else{
+if(arguments.length==2){
+_1bd=arguments[0];
+stop=arguments[1];
+}else{
+if(arguments.length==3){
+_1bd=arguments[0];
+stop=arguments[1];
+step=arguments[2];
+}else{
+throw new TypeError("range() takes 1, 2, or 3 arguments!");
+}
+}
+}
+if(step===0){
+throw new TypeError("range() step must not be 0");
+}
+return {next:function(){
+if((step>0&&_1bd>=stop)||(step<0&&_1bd<=stop)){
+throw MochiKit.Iter.StopIteration;
+}
+var rval=_1bd;
+_1bd+=step;
+return rval;
+},repr:function(){
+return "range("+[_1bd,stop,step].join(", ")+")";
+},toString:MochiKit.Base.forwardCall("repr")};
+},sum:function(_1c1,_1c2){
+if(typeof (_1c2)=="undefined"||_1c2===null){
+_1c2=0;
+}
+var x=_1c2;
+var self=MochiKit.Iter;
+_1c1=self.iter(_1c1);
+try{
+while(true){
+x+=_1c1.next();
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+}
+return x;
+},exhaust:function(_1c5){
+var self=MochiKit.Iter;
+_1c5=self.iter(_1c5);
+try{
+while(true){
+_1c5.next();
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+}
+},forEach:function(_1c7,func,obj){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+if(arguments.length>2){
+func=m.bind(func,obj);
+}
+if(m.isArrayLike(_1c7)&&!self.isIterable(_1c7)){
+try{
+for(var i=0;i<_1c7.length;i++){
+func(_1c7[i]);
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+}
+}else{
+self.exhaust(self.imap(func,_1c7));
+}
+},every:function(_1cd,func){
+var self=MochiKit.Iter;
+try{
+self.ifilterfalse(func,_1cd).next();
+return false;
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+return true;
+}
+},sorted:function(_1d0,cmp){
+var rval=MochiKit.Iter.list(_1d0);
+if(arguments.length==1){
+cmp=MochiKit.Base.compare;
+}
+rval.sort(cmp);
+return rval;
+},reversed:function(_1d3){
+var rval=MochiKit.Iter.list(_1d3);
+rval.reverse();
+return rval;
+},some:function(_1d5,func){
+var self=MochiKit.Iter;
+try{
+self.ifilter(func,_1d5).next();
+return true;
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+return false;
+}
+},iextend:function(lst,_1d9){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+if(m.isArrayLike(_1d9)&&!self.isIterable(_1d9)){
+for(var i=0;i<_1d9.length;i++){
+lst.push(_1d9[i]);
+}
+}else{
+_1d9=self.iter(_1d9);
+try{
+while(true){
+lst.push(_1d9.next());
+}
+}
+catch(e){
+if(e!=self.StopIteration){
+throw e;
+}
+}
+}
+return lst;
+},groupby:function(_1dd,_1de){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+if(arguments.length<2){
+_1de=m.operator.identity;
+}
+_1dd=self.iter(_1dd);
+var pk=undefined;
+var k=undefined;
+var v;
+function fetch(){
+v=_1dd.next();
+k=_1de(v);
+}
+function eat(){
+var ret=v;
+v=undefined;
+return ret;
+}
+var _1e5=true;
+var _1e6=m.compare;
+return {repr:function(){
+return "groupby(...)";
+},next:function(){
+while(_1e6(k,pk)===0){
+fetch();
+if(_1e5){
+_1e5=false;
+break;
+}
+}
+pk=k;
+return [k,{next:function(){
+if(v==undefined){
+fetch();
+}
+if(_1e6(k,pk)!==0){
+throw self.StopIteration;
+}
+return eat();
+}}];
+}};
+},groupby_as_array:function(_1e7,_1e8){
+var m=MochiKit.Base;
+var self=MochiKit.Iter;
+if(arguments.length<2){
+_1e8=m.operator.identity;
+}
+_1e7=self.iter(_1e7);
+var _1eb=[];
+var _1ec=true;
+var _1ed;
+var _1ee=m.compare;
+while(true){
+try{
+var _1ef=_1e7.next();
+var key=_1e8(_1ef);
+}
+catch(e){
+if(e==self.StopIteration){
+break;
+}
+throw e;
+}
+if(_1ec||_1ee(key,_1ed)!==0){
+var _1f1=[];
+_1eb.push([key,_1f1]);
+}
+_1f1.push(_1ef);
+_1ec=false;
+_1ed=key;
+}
+return _1eb;
+},arrayLikeIter:function(_1f2){
+var i=0;
+return {repr:function(){
+return "arrayLikeIter(...)";
+},toString:MochiKit.Base.forwardCall("repr"),next:function(){
+if(i>=_1f2.length){
+throw MochiKit.Iter.StopIteration;
+}
+return _1f2[i++];
+}};
+},hasIterateNext:function(_1f4){
+return (_1f4&&typeof (_1f4.iterateNext)=="function");
+},iterateNextIter:function(_1f5){
+return {repr:function(){
+return "iterateNextIter(...)";
+},toString:MochiKit.Base.forwardCall("repr"),next:function(){
+var rval=_1f5.iterateNext();
+if(rval===null||rval===undefined){
+throw MochiKit.Iter.StopIteration;
+}
+return rval;
+}};
+}});
+MochiKit.Iter.EXPORT_OK=["iteratorRegistry","arrayLikeIter","hasIterateNext","iterateNextIter"];
+MochiKit.Iter.EXPORT=["StopIteration","registerIteratorFactory","iter","count","cycle","repeat","next","izip","ifilter","ifilterfalse","islice","imap","applymap","chain","takewhile","dropwhile","tee","list","reduce","range","sum","exhaust","forEach","every","sorted","reversed","some","iextend","groupby","groupby_as_array"];
+MochiKit.Iter.__new__=function(){
+var m=MochiKit.Base;
+if(typeof (StopIteration)!="undefined"){
+this.StopIteration=StopIteration;
+}else{
+this.StopIteration=new m.NamedError("StopIteration");
+}
+this.iteratorRegistry=new m.AdapterRegistry();
+this.registerIteratorFactory("arrayLike",m.isArrayLike,this.arrayLikeIter);
+this.registerIteratorFactory("iterateNext",this.hasIterateNext,this.iterateNextIter);
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+};
+MochiKit.Iter.__new__();
+if(MochiKit.__export__){
+reduce=MochiKit.Iter.reduce;
+}
+MochiKit.Base._exportSymbols(this,MochiKit.Iter);
+MochiKit.Base._deps("Logging",["Base"]);
+MochiKit.Logging.NAME="MochiKit.Logging";
+MochiKit.Logging.VERSION="1.4.2";
+MochiKit.Logging.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Logging.toString=function(){
+return this.__repr__();
+};
+MochiKit.Logging.EXPORT=["LogLevel","LogMessage","Logger","alertListener","logger","log","logError","logDebug","logFatal","logWarning"];
+MochiKit.Logging.EXPORT_OK=["logLevelAtLeast","isLogMessage","compareLogMessage"];
+MochiKit.Logging.LogMessage=function(num,_1f9,info){
+this.num=num;
+this.level=_1f9;
+this.info=info;
+this.timestamp=new Date();
+};
+MochiKit.Logging.LogMessage.prototype={repr:function(){
+var m=MochiKit.Base;
+return "LogMessage("+m.map(m.repr,[this.num,this.level,this.info]).join(", ")+")";
+},toString:MochiKit.Base.forwardCall("repr")};
+MochiKit.Base.update(MochiKit.Logging,{logLevelAtLeast:function(_1fc){
+var self=MochiKit.Logging;
+if(typeof (_1fc)=="string"){
+_1fc=self.LogLevel[_1fc];
+}
+return function(msg){
+var _1ff=msg.level;
+if(typeof (_1ff)=="string"){
+_1ff=self.LogLevel[_1ff];
+}
+return _1ff>=_1fc;
+};
+},isLogMessage:function(){
+var _200=MochiKit.Logging.LogMessage;
+for(var i=0;i<arguments.length;i++){
+if(!(arguments[i] instanceof _200)){
+return false;
+}
+}
+return true;
+},compareLogMessage:function(a,b){
+return MochiKit.Base.compare([a.level,a.info],[b.level,b.info]);
+},alertListener:function(msg){
+alert("num: "+msg.num+"\nlevel: "+msg.level+"\ninfo: "+msg.info.join(" "));
+}});
+MochiKit.Logging.Logger=function(_205){
+this.counter=0;
+if(typeof (_205)=="undefined"||_205===null){
+_205=-1;
+}
+this.maxSize=_205;
+this._messages=[];
+this.listeners={};
+this.useNativeConsole=false;
+};
+MochiKit.Logging.Logger.prototype={clear:function(){
+this._messages.splice(0,this._messages.length);
+},logToConsole:function(msg){
+if(typeof (window)!="undefined"&&window.console&&window.console.log){
+window.console.log(msg.replace(/%/g,"\uff05"));
+}else{
+if(typeof (opera)!="undefined"&&opera.postError){
+opera.postError(msg);
+}else{
+if(typeof (printfire)=="function"){
+printfire(msg);
+}else{
+if(typeof (Debug)!="undefined"&&Debug.writeln){
+Debug.writeln(msg);
+}else{
+if(typeof (debug)!="undefined"&&debug.trace){
+debug.trace(msg);
+}
+}
+}
+}
+}
+},dispatchListeners:function(msg){
+for(var k in this.listeners){
+var pair=this.listeners[k];
+if(pair.ident!=k||(pair[0]&&!pair[0](msg))){
+continue;
+}
+pair[1](msg);
+}
+},addListener:function(_20a,_20b,_20c){
+if(typeof (_20b)=="string"){
+_20b=MochiKit.Logging.logLevelAtLeast(_20b);
+}
+var _20d=[_20b,_20c];
+_20d.ident=_20a;
+this.listeners[_20a]=_20d;
+},removeListener:function(_20e){
+delete this.listeners[_20e];
+},baseLog:function(_20f,_210){
+if(typeof (_20f)=="number"){
+if(_20f>=MochiKit.Logging.LogLevel.FATAL){
+_20f="FATAL";
+}else{
+if(_20f>=MochiKit.Logging.LogLevel.ERROR){
+_20f="ERROR";
+}else{
+if(_20f>=MochiKit.Logging.LogLevel.WARNING){
+_20f="WARNING";
+}else{
+if(_20f>=MochiKit.Logging.LogLevel.INFO){
+_20f="INFO";
+}else{
+_20f="DEBUG";
+}
+}
+}
+}
+}
+var msg=new MochiKit.Logging.LogMessage(this.counter,_20f,MochiKit.Base.extend(null,arguments,1));
+this._messages.push(msg);
+this.dispatchListeners(msg);
+if(this.useNativeConsole){
+this.logToConsole(msg.level+": "+msg.info.join(" "));
+}
+this.counter+=1;
+while(this.maxSize>=0&&this._messages.length>this.maxSize){
+this._messages.shift();
+}
+},getMessages:function(_212){
+var _213=0;
+if(!(typeof (_212)=="undefined"||_212===null)){
+_213=Math.max(0,this._messages.length-_212);
+}
+return this._messages.slice(_213);
+},getMessageText:function(_214){
+if(typeof (_214)=="undefined"||_214===null){
+_214=30;
+}
+var _215=this.getMessages(_214);
+if(_215.length){
+var lst=map(function(m){
+return "\n ["+m.num+"] "+m.level+": "+m.info.join(" ");
+},_215);
+lst.unshift("LAST "+_215.length+" MESSAGES:");
+return lst.join("");
+}
+return "";
+},debuggingBookmarklet:function(_218){
+if(typeof (MochiKit.LoggingPane)=="undefined"){
+alert(this.getMessageText());
+}else{
+MochiKit.LoggingPane.createLoggingPane(_218||false);
+}
+}};
+MochiKit.Logging.__new__=function(){
+this.LogLevel={ERROR:40,FATAL:50,WARNING:30,INFO:20,DEBUG:10};
+var m=MochiKit.Base;
+m.registerComparator("LogMessage",this.isLogMessage,this.compareLogMessage);
+var _21a=m.partial;
+var _21b=this.Logger;
+var _21c=_21b.prototype.baseLog;
+m.update(this.Logger.prototype,{debug:_21a(_21c,"DEBUG"),log:_21a(_21c,"INFO"),error:_21a(_21c,"ERROR"),fatal:_21a(_21c,"FATAL"),warning:_21a(_21c,"WARNING")});
+var self=this;
+var _21e=function(name){
+return function(){
+self.logger[name].apply(self.logger,arguments);
+};
+};
+this.log=_21e("log");
+this.logError=_21e("error");
+this.logDebug=_21e("debug");
+this.logFatal=_21e("fatal");
+this.logWarning=_21e("warning");
+this.logger=new _21b();
+this.logger.useNativeConsole=true;
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+};
+if(typeof (printfire)=="undefined"&&typeof (document)!="undefined"&&document.createEvent&&typeof (dispatchEvent)!="undefined"){
+printfire=function(){
+printfire.args=arguments;
+var ev=document.createEvent("Events");
+ev.initEvent("printfire",false,true);
+dispatchEvent(ev);
+};
+}
+MochiKit.Logging.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Logging);
+MochiKit.Base._deps("DateTime",["Base"]);
+MochiKit.DateTime.NAME="MochiKit.DateTime";
+MochiKit.DateTime.VERSION="1.4.2";
+MochiKit.DateTime.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.DateTime.toString=function(){
+return this.__repr__();
+};
+MochiKit.DateTime.isoDate=function(str){
+str=str+"";
+if(typeof (str)!="string"||str.length===0){
+return null;
+}
+var iso=str.split("-");
+if(iso.length===0){
+return null;
+}
+var date=new Date(iso[0],iso[1]-1,iso[2]);
+date.setFullYear(iso[0]);
+date.setMonth(iso[1]-1);
+date.setDate(iso[2]);
+return date;
+};
+MochiKit.DateTime._isoRegexp=/(\d{4,})(?:-(\d{1,2})(?:-(\d{1,2})(?:[T ](\d{1,2}):(\d{1,2})(?::(\d{1,2})(?:\.(\d+))?)?(?:(Z)|([+-])(\d{1,2})(?::(\d{1,2}))?)?)?)?)?/;
+MochiKit.DateTime.isoTimestamp=function(str){
+str=str+"";
+if(typeof (str)!="string"||str.length===0){
+return null;
+}
+var res=str.match(MochiKit.DateTime._isoRegexp);
+if(typeof (res)=="undefined"||res===null){
+return null;
+}
+var year,_227,day,hour,min,sec,msec;
+year=parseInt(res[1],10);
+if(typeof (res[2])=="undefined"||res[2]===""){
+return new Date(year);
+}
+_227=parseInt(res[2],10)-1;
+day=parseInt(res[3],10);
+if(typeof (res[4])=="undefined"||res[4]===""){
+return new Date(year,_227,day);
+}
+hour=parseInt(res[4],10);
+min=parseInt(res[5],10);
+sec=(typeof (res[6])!="undefined"&&res[6]!=="")?parseInt(res[6],10):0;
+if(typeof (res[7])!="undefined"&&res[7]!==""){
+msec=Math.round(1000*parseFloat("0."+res[7]));
+}else{
+msec=0;
+}
+if((typeof (res[8])=="undefined"||res[8]==="")&&(typeof (res[9])=="undefined"||res[9]==="")){
+return new Date(year,_227,day,hour,min,sec,msec);
+}
+var ofs;
+if(typeof (res[9])!="undefined"&&res[9]!==""){
+ofs=parseInt(res[10],10)*3600000;
+if(typeof (res[11])!="undefined"&&res[11]!==""){
+ofs+=parseInt(res[11],10)*60000;
+}
+if(res[9]=="-"){
+ofs=-ofs;
+}
+}else{
+ofs=0;
+}
+return new Date(Date.UTC(year,_227,day,hour,min,sec,msec)-ofs);
+};
+MochiKit.DateTime.toISOTime=function(date,_22f){
+if(typeof (date)=="undefined"||date===null){
+return null;
+}
+var hh=date.getHours();
+var mm=date.getMinutes();
+var ss=date.getSeconds();
+var lst=[((_22f&&(hh<10))?"0"+hh:hh),((mm<10)?"0"+mm:mm),((ss<10)?"0"+ss:ss)];
+return lst.join(":");
+};
+MochiKit.DateTime.toISOTimestamp=function(date,_235){
+if(typeof (date)=="undefined"||date===null){
+return null;
+}
+var sep=_235?"T":" ";
+var foot=_235?"Z":"";
+if(_235){
+date=new Date(date.getTime()+(date.getTimezoneOffset()*60000));
+}
+return MochiKit.DateTime.toISODate(date)+sep+MochiKit.DateTime.toISOTime(date,_235)+foot;
+};
+MochiKit.DateTime.toISODate=function(date){
+if(typeof (date)=="undefined"||date===null){
+return null;
+}
+var _239=MochiKit.DateTime._padTwo;
+var _23a=MochiKit.DateTime._padFour;
+return [_23a(date.getFullYear()),_239(date.getMonth()+1),_239(date.getDate())].join("-");
+};
+MochiKit.DateTime.americanDate=function(d){
+d=d+"";
+if(typeof (d)!="string"||d.length===0){
+return null;
+}
+var a=d.split("/");
+return new Date(a[2],a[0]-1,a[1]);
+};
+MochiKit.DateTime._padTwo=function(n){
+return (n>9)?n:"0"+n;
+};
+MochiKit.DateTime._padFour=function(n){
+switch(n.toString().length){
+case 1:
+return "000"+n;
+break;
+case 2:
+return "00"+n;
+break;
+case 3:
+return "0"+n;
+break;
+case 4:
+default:
+return n;
+}
+};
+MochiKit.DateTime.toPaddedAmericanDate=function(d){
+if(typeof (d)=="undefined"||d===null){
+return null;
+}
+var _240=MochiKit.DateTime._padTwo;
+return [_240(d.getMonth()+1),_240(d.getDate()),d.getFullYear()].join("/");
+};
+MochiKit.DateTime.toAmericanDate=function(d){
+if(typeof (d)=="undefined"||d===null){
+return null;
+}
+return [d.getMonth()+1,d.getDate(),d.getFullYear()].join("/");
+};
+MochiKit.DateTime.EXPORT=["isoDate","isoTimestamp","toISOTime","toISOTimestamp","toISODate","americanDate","toPaddedAmericanDate","toAmericanDate"];
+MochiKit.DateTime.EXPORT_OK=[];
+MochiKit.DateTime.EXPORT_TAGS={":common":MochiKit.DateTime.EXPORT,":all":MochiKit.DateTime.EXPORT};
+MochiKit.DateTime.__new__=function(){
+var base=this.NAME+".";
+for(var k in this){
+var o=this[k];
+if(typeof (o)=="function"&&typeof (o.NAME)=="undefined"){
+try{
+o.NAME=base+k;
+}
+catch(e){
+}
+}
+}
+};
+MochiKit.DateTime.__new__();
+if(typeof (MochiKit.Base)!="undefined"){
+MochiKit.Base._exportSymbols(this,MochiKit.DateTime);
+}else{
+(function(_245,_246){
+if((typeof (JSAN)=="undefined"&&typeof (dojo)=="undefined")||(MochiKit.__export__===false)){
+var all=_246.EXPORT_TAGS[":all"];
+for(var i=0;i<all.length;i++){
+_245[all[i]]=_246[all[i]];
+}
+}
+})(this,MochiKit.DateTime);
+}
+MochiKit.Base._deps("Format",["Base"]);
+MochiKit.Format.NAME="MochiKit.Format";
+MochiKit.Format.VERSION="1.4.2";
+MochiKit.Format.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Format.toString=function(){
+return this.__repr__();
+};
+MochiKit.Format._numberFormatter=function(_249,_24a,_24b,_24c,_24d,_24e,_24f,_250,_251){
+return function(num){
+num=parseFloat(num);
+if(typeof (num)=="undefined"||num===null||isNaN(num)){
+return _249;
+}
+var _253=_24a;
+var _254=_24b;
+if(num<0){
+num=-num;
+}else{
+_253=_253.replace(/-/,"");
+}
+var me=arguments.callee;
+var fmt=MochiKit.Format.formatLocale(_24c);
+if(_24d){
+num=num*100;
+_254=fmt.percent+_254;
+}
+num=MochiKit.Format.roundToFixed(num,_24e);
+var _257=num.split(/\./);
+var _258=_257[0];
+var frac=(_257.length==1)?"":_257[1];
+var res="";
+while(_258.length<_24f){
+_258="0"+_258;
+}
+if(_250){
+while(_258.length>_250){
+var i=_258.length-_250;
+res=fmt.separator+_258.substring(i,_258.length)+res;
+_258=_258.substring(0,i);
+}
+}
+res=_258+res;
+if(_24e>0){
+while(frac.length<_251){
+frac=frac+"0";
+}
+res=res+fmt.decimal+frac;
+}
+return _253+res+_254;
+};
+};
+MochiKit.Format.numberFormatter=function(_25c,_25d,_25e){
+if(typeof (_25d)=="undefined"){
+_25d="";
+}
+var _25f=_25c.match(/((?:[0#]+,)?[0#]+)(?:\.([0#]+))?(%)?/);
+if(!_25f){
+throw TypeError("Invalid pattern");
+}
+var _260=_25c.substr(0,_25f.index);
+var _261=_25c.substr(_25f.index+_25f[0].length);
+if(_260.search(/-/)==-1){
+_260=_260+"-";
+}
+var _262=_25f[1];
+var frac=(typeof (_25f[2])=="string"&&_25f[2]!="")?_25f[2]:"";
+var _264=(typeof (_25f[3])=="string"&&_25f[3]!="");
+var tmp=_262.split(/,/);
+var _266;
+if(typeof (_25e)=="undefined"){
+_25e="default";
+}
+if(tmp.length==1){
+_266=null;
+}else{
+_266=tmp[1].length;
+}
+var _267=_262.length-_262.replace(/0/g,"").length;
+var _268=frac.length-frac.replace(/0/g,"").length;
+var _269=frac.length;
+var rval=MochiKit.Format._numberFormatter(_25d,_260,_261,_25e,_264,_269,_267,_266,_268);
+var m=MochiKit.Base;
+if(m){
+var fn=arguments.callee;
+var args=m.concat(arguments);
+rval.repr=function(){
+return [self.NAME,"(",map(m.repr,args).join(", "),")"].join("");
+};
+}
+return rval;
+};
+MochiKit.Format.formatLocale=function(_26e){
+if(typeof (_26e)=="undefined"||_26e===null){
+_26e="default";
+}
+if(typeof (_26e)=="string"){
+var rval=MochiKit.Format.LOCALE[_26e];
+if(typeof (rval)=="string"){
+rval=arguments.callee(rval);
+MochiKit.Format.LOCALE[_26e]=rval;
+}
+return rval;
+}else{
+return _26e;
+}
+};
+MochiKit.Format.twoDigitAverage=function(_270,_271){
+if(_271){
+var res=_270/_271;
+if(!isNaN(res)){
+return MochiKit.Format.twoDigitFloat(res);
+}
+}
+return "0";
+};
+MochiKit.Format.twoDigitFloat=function(_273){
+var res=roundToFixed(_273,2);
+if(res.indexOf(".00")>0){
+return res.substring(0,res.length-3);
+}else{
+if(res.charAt(res.length-1)=="0"){
+return res.substring(0,res.length-1);
+}else{
+return res;
+}
+}
+};
+MochiKit.Format.lstrip=function(str,_276){
+str=str+"";
+if(typeof (str)!="string"){
+return null;
+}
+if(!_276){
+return str.replace(/^\s+/,"");
+}else{
+return str.replace(new RegExp("^["+_276+"]+"),"");
+}
+};
+MochiKit.Format.rstrip=function(str,_278){
+str=str+"";
+if(typeof (str)!="string"){
+return null;
+}
+if(!_278){
+return str.replace(/\s+$/,"");
+}else{
+return str.replace(new RegExp("["+_278+"]+$"),"");
+}
+};
+MochiKit.Format.strip=function(str,_27a){
+var self=MochiKit.Format;
+return self.rstrip(self.lstrip(str,_27a),_27a);
+};
+MochiKit.Format.truncToFixed=function(_27c,_27d){
+var res=Math.floor(_27c).toFixed(0);
+if(_27c<0){
+res=Math.ceil(_27c).toFixed(0);
+if(res.charAt(0)!="-"&&_27d>0){
+res="-"+res;
+}
+}
+if(res.indexOf("e")<0&&_27d>0){
+var tail=_27c.toString();
+if(tail.indexOf("e")>0){
+tail=".";
+}else{
+if(tail.indexOf(".")<0){
+tail=".";
+}else{
+tail=tail.substring(tail.indexOf("."));
+}
+}
+if(tail.length-1>_27d){
+tail=tail.substring(0,_27d+1);
+}
+while(tail.length-1<_27d){
+tail+="0";
+}
+res+=tail;
+}
+return res;
+};
+MochiKit.Format.roundToFixed=function(_280,_281){
+var _282=Math.abs(_280)+0.5*Math.pow(10,-_281);
+var res=MochiKit.Format.truncToFixed(_282,_281);
+if(_280<0){
+res="-"+res;
+}
+return res;
+};
+MochiKit.Format.percentFormat=function(_284){
+return MochiKit.Format.twoDigitFloat(100*_284)+"%";
+};
+MochiKit.Format.EXPORT=["truncToFixed","roundToFixed","numberFormatter","formatLocale","twoDigitAverage","twoDigitFloat","percentFormat","lstrip","rstrip","strip"];
+MochiKit.Format.LOCALE={en_US:{separator:",",decimal:".",percent:"%"},de_DE:{separator:".",decimal:",",percent:"%"},pt_BR:{separator:".",decimal:",",percent:"%"},fr_FR:{separator:" ",decimal:",",percent:"%"},"default":"en_US"};
+MochiKit.Format.EXPORT_OK=[];
+MochiKit.Format.EXPORT_TAGS={":all":MochiKit.Format.EXPORT,":common":MochiKit.Format.EXPORT};
+MochiKit.Format.__new__=function(){
+var base=this.NAME+".";
+var k,v,o;
+for(k in this.LOCALE){
+o=this.LOCALE[k];
+if(typeof (o)=="object"){
+o.repr=function(){
+return this.NAME;
+};
+o.NAME=base+"LOCALE."+k;
+}
+}
+for(k in this){
+o=this[k];
+if(typeof (o)=="function"&&typeof (o.NAME)=="undefined"){
+try{
+o.NAME=base+k;
+}
+catch(e){
+}
+}
+}
+};
+MochiKit.Format.__new__();
+if(typeof (MochiKit.Base)!="undefined"){
+MochiKit.Base._exportSymbols(this,MochiKit.Format);
+}else{
+(function(_289,_28a){
+if((typeof (JSAN)=="undefined"&&typeof (dojo)=="undefined")||(MochiKit.__export__===false)){
+var all=_28a.EXPORT_TAGS[":all"];
+for(var i=0;i<all.length;i++){
+_289[all[i]]=_28a[all[i]];
+}
+}
+})(this,MochiKit.Format);
+}
+MochiKit.Base._deps("Async",["Base"]);
+MochiKit.Async.NAME="MochiKit.Async";
+MochiKit.Async.VERSION="1.4.2";
+MochiKit.Async.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Async.toString=function(){
+return this.__repr__();
+};
+MochiKit.Async.Deferred=function(_28d){
+this.chain=[];
+this.id=this._nextId();
+this.fired=-1;
+this.paused=0;
+this.results=[null,null];
+this.canceller=_28d;
+this.silentlyCancelled=false;
+this.chained=false;
+};
+MochiKit.Async.Deferred.prototype={repr:function(){
+var _28e;
+if(this.fired==-1){
+_28e="unfired";
+}else{
+if(this.fired===0){
+_28e="success";
+}else{
+_28e="error";
+}
+}
+return "Deferred("+this.id+", "+_28e+")";
+},toString:MochiKit.Base.forwardCall("repr"),_nextId:MochiKit.Base.counter(),cancel:function(){
+var self=MochiKit.Async;
+if(this.fired==-1){
+if(this.canceller){
+this.canceller(this);
+}else{
+this.silentlyCancelled=true;
+}
+if(this.fired==-1){
+this.errback(new self.CancelledError(this));
+}
+}else{
+if((this.fired===0)&&(this.results[0] instanceof self.Deferred)){
+this.results[0].cancel();
+}
+}
+},_resback:function(res){
+this.fired=((res instanceof Error)?1:0);
+this.results[this.fired]=res;
+this._fire();
+},_check:function(){
+if(this.fired!=-1){
+if(!this.silentlyCancelled){
+throw new MochiKit.Async.AlreadyCalledError(this);
+}
+this.silentlyCancelled=false;
+return;
+}
+},callback:function(res){
+this._check();
+if(res instanceof MochiKit.Async.Deferred){
+throw new Error("Deferred instances can only be chained if they are the result of a callback");
+}
+this._resback(res);
+},errback:function(res){
+this._check();
+var self=MochiKit.Async;
+if(res instanceof self.Deferred){
+throw new Error("Deferred instances can only be chained if they are the result of a callback");
+}
+if(!(res instanceof Error)){
+res=new self.GenericError(res);
+}
+this._resback(res);
+},addBoth:function(fn){
+if(arguments.length>1){
+fn=MochiKit.Base.partial.apply(null,arguments);
+}
+return this.addCallbacks(fn,fn);
+},addCallback:function(fn){
+if(arguments.length>1){
+fn=MochiKit.Base.partial.apply(null,arguments);
+}
+return this.addCallbacks(fn,null);
+},addErrback:function(fn){
+if(arguments.length>1){
+fn=MochiKit.Base.partial.apply(null,arguments);
+}
+return this.addCallbacks(null,fn);
+},addCallbacks:function(cb,eb){
+if(this.chained){
+throw new Error("Chained Deferreds can not be re-used");
+}
+this.chain.push([cb,eb]);
+if(this.fired>=0){
+this._fire();
+}
+return this;
+},_fire:function(){
+var _299=this.chain;
+var _29a=this.fired;
+var res=this.results[_29a];
+var self=this;
+var cb=null;
+while(_299.length>0&&this.paused===0){
+var pair=_299.shift();
+var f=pair[_29a];
+if(f===null){
+continue;
+}
+try{
+res=f(res);
+_29a=((res instanceof Error)?1:0);
+if(res instanceof MochiKit.Async.Deferred){
+cb=function(res){
+self._resback(res);
+self.paused--;
+if((self.paused===0)&&(self.fired>=0)){
+self._fire();
+}
+};
+this.paused++;
+}
+}
+catch(err){
+_29a=1;
+if(!(err instanceof Error)){
+err=new MochiKit.Async.GenericError(err);
+}
+res=err;
+}
+}
+this.fired=_29a;
+this.results[_29a]=res;
+if(cb&&this.paused){
+res.addBoth(cb);
+res.chained=true;
+}
+}};
+MochiKit.Base.update(MochiKit.Async,{evalJSONRequest:function(req){
+return MochiKit.Base.evalJSON(req.responseText);
+},succeed:function(_2a2){
+var d=new MochiKit.Async.Deferred();
+d.callback.apply(d,arguments);
+return d;
+},fail:function(_2a4){
+var d=new MochiKit.Async.Deferred();
+d.errback.apply(d,arguments);
+return d;
+},getXMLHttpRequest:function(){
+var self=arguments.callee;
+if(!self.XMLHttpRequest){
+var _2a7=[function(){
+return new XMLHttpRequest();
+},function(){
+return new ActiveXObject("Msxml2.XMLHTTP");
+},function(){
+return new ActiveXObject("Microsoft.XMLHTTP");
+},function(){
+return new ActiveXObject("Msxml2.XMLHTTP.4.0");
+},function(){
+throw new MochiKit.Async.BrowserComplianceError("Browser does not support XMLHttpRequest");
+}];
+for(var i=0;i<_2a7.length;i++){
+var func=_2a7[i];
+try{
+self.XMLHttpRequest=func;
+return func();
+}
+catch(e){
+}
+}
+}
+return self.XMLHttpRequest();
+},_xhr_onreadystatechange:function(d){
+var m=MochiKit.Base;
+if(this.readyState==4){
+try{
+this.onreadystatechange=null;
+}
+catch(e){
+try{
+this.onreadystatechange=m.noop;
+}
+catch(e){
+}
+}
+var _2ac=null;
+try{
+_2ac=this.status;
+if(!_2ac&&m.isNotEmpty(this.responseText)){
+_2ac=304;
+}
+}
+catch(e){
+}
+if(_2ac==200||_2ac==201||_2ac==204||_2ac==304||_2ac==1223){
+d.callback(this);
+}else{
+var err=new MochiKit.Async.XMLHttpRequestError(this,"Request failed");
+if(err.number){
+d.errback(err);
+}else{
+d.errback(err);
+}
+}
+}
+},_xhr_canceller:function(req){
+try{
+req.onreadystatechange=null;
+}
+catch(e){
+try{
+req.onreadystatechange=MochiKit.Base.noop;
+}
+catch(e){
+}
+}
+req.abort();
+},sendXMLHttpRequest:function(req,_2b0){
+if(typeof (_2b0)=="undefined"||_2b0===null){
+_2b0="";
+}
+var m=MochiKit.Base;
+var self=MochiKit.Async;
+var d=new self.Deferred(m.partial(self._xhr_canceller,req));
+try{
+req.onreadystatechange=m.bind(self._xhr_onreadystatechange,req,d);
+req.send(_2b0);
+}
+catch(e){
+try{
+req.onreadystatechange=null;
+}
+catch(ignore){
+}
+d.errback(e);
+}
+return d;
+},doXHR:function(url,opts){
+var self=MochiKit.Async;
+return self.callLater(0,self._doXHR,url,opts);
+},_doXHR:function(url,opts){
+var m=MochiKit.Base;
+opts=m.update({method:"GET",sendContent:""},opts);
+var self=MochiKit.Async;
+var req=self.getXMLHttpRequest();
+if(opts.queryString){
+var qs=m.queryString(opts.queryString);
+if(qs){
+url+="?"+qs;
+}
+}
+if("username" in opts){
+req.open(opts.method,url,true,opts.username,opts.password);
+}else{
+req.open(opts.method,url,true);
+}
+if(req.overrideMimeType&&opts.mimeType){
+req.overrideMimeType(opts.mimeType);
+}
+req.setRequestHeader("X-Requested-With","XMLHttpRequest");
+if(opts.headers){
+var _2bd=opts.headers;
+if(!m.isArrayLike(_2bd)){
+_2bd=m.items(_2bd);
+}
+for(var i=0;i<_2bd.length;i++){
+var _2bf=_2bd[i];
+var name=_2bf[0];
+var _2c1=_2bf[1];
+req.setRequestHeader(name,_2c1);
+}
+}
+return self.sendXMLHttpRequest(req,opts.sendContent);
+},_buildURL:function(url){
+if(arguments.length>1){
+var m=MochiKit.Base;
+var qs=m.queryString.apply(null,m.extend(null,arguments,1));
+if(qs){
+return url+"?"+qs;
+}
+}
+return url;
+},doSimpleXMLHttpRequest:function(url){
+var self=MochiKit.Async;
+url=self._buildURL.apply(self,arguments);
+return self.doXHR(url);
+},loadJSONDoc:function(url){
+var self=MochiKit.Async;
+url=self._buildURL.apply(self,arguments);
+var d=self.doXHR(url,{"mimeType":"text/plain","headers":[["Accept","application/json"]]});
+d=d.addCallback(self.evalJSONRequest);
+return d;
+},wait:function(_2ca,_2cb){
+var d=new MochiKit.Async.Deferred();
+var m=MochiKit.Base;
+if(typeof (_2cb)!="undefined"){
+d.addCallback(function(){
+return _2cb;
+});
+}
+var _2ce=setTimeout(m.bind("callback",d),Math.floor(_2ca*1000));
+d.canceller=function(){
+try{
+clearTimeout(_2ce);
+}
+catch(e){
+}
+};
+return d;
+},callLater:function(_2cf,func){
+var m=MochiKit.Base;
+var _2d2=m.partial.apply(m,m.extend(null,arguments,1));
+return MochiKit.Async.wait(_2cf).addCallback(function(res){
+return _2d2();
+});
+}});
+MochiKit.Async.DeferredLock=function(){
+this.waiting=[];
+this.locked=false;
+this.id=this._nextId();
+};
+MochiKit.Async.DeferredLock.prototype={__class__:MochiKit.Async.DeferredLock,acquire:function(){
+var d=new MochiKit.Async.Deferred();
+if(this.locked){
+this.waiting.push(d);
+}else{
+this.locked=true;
+d.callback(this);
+}
+return d;
+},release:function(){
+if(!this.locked){
+throw TypeError("Tried to release an unlocked DeferredLock");
+}
+this.locked=false;
+if(this.waiting.length>0){
+this.locked=true;
+this.waiting.shift().callback(this);
+}
+},_nextId:MochiKit.Base.counter(),repr:function(){
+var _2d5;
+if(this.locked){
+_2d5="locked, "+this.waiting.length+" waiting";
+}else{
+_2d5="unlocked";
+}
+return "DeferredLock("+this.id+", "+_2d5+")";
+},toString:MochiKit.Base.forwardCall("repr")};
+MochiKit.Async.DeferredList=function(list,_2d7,_2d8,_2d9,_2da){
+MochiKit.Async.Deferred.apply(this,[_2da]);
+this.list=list;
+var _2db=[];
+this.resultList=_2db;
+this.finishedCount=0;
+this.fireOnOneCallback=_2d7;
+this.fireOnOneErrback=_2d8;
+this.consumeErrors=_2d9;
+var cb=MochiKit.Base.bind(this._cbDeferred,this);
+for(var i=0;i<list.length;i++){
+var d=list[i];
+_2db.push(undefined);
+d.addCallback(cb,i,true);
+d.addErrback(cb,i,false);
+}
+if(list.length===0&&!_2d7){
+this.callback(this.resultList);
+}
+};
+MochiKit.Async.DeferredList.prototype=new MochiKit.Async.Deferred();
+MochiKit.Async.DeferredList.prototype._cbDeferred=function(_2df,_2e0,_2e1){
+this.resultList[_2df]=[_2e0,_2e1];
+this.finishedCount+=1;
+if(this.fired==-1){
+if(_2e0&&this.fireOnOneCallback){
+this.callback([_2df,_2e1]);
+}else{
+if(!_2e0&&this.fireOnOneErrback){
+this.errback(_2e1);
+}else{
+if(this.finishedCount==this.list.length){
+this.callback(this.resultList);
+}
+}
+}
+}
+if(!_2e0&&this.consumeErrors){
+_2e1=null;
+}
+return _2e1;
+};
+MochiKit.Async.gatherResults=function(_2e2){
+var d=new MochiKit.Async.DeferredList(_2e2,false,true,false);
+d.addCallback(function(_2e4){
+var ret=[];
+for(var i=0;i<_2e4.length;i++){
+ret.push(_2e4[i][1]);
+}
+return ret;
+});
+return d;
+};
+MochiKit.Async.maybeDeferred=function(func){
+var self=MochiKit.Async;
+var _2e9;
+try{
+var r=func.apply(null,MochiKit.Base.extend([],arguments,1));
+if(r instanceof self.Deferred){
+_2e9=r;
+}else{
+if(r instanceof Error){
+_2e9=self.fail(r);
+}else{
+_2e9=self.succeed(r);
+}
+}
+}
+catch(e){
+_2e9=self.fail(e);
+}
+return _2e9;
+};
+MochiKit.Async.EXPORT=["AlreadyCalledError","CancelledError","BrowserComplianceError","GenericError","XMLHttpRequestError","Deferred","succeed","fail","getXMLHttpRequest","doSimpleXMLHttpRequest","loadJSONDoc","wait","callLater","sendXMLHttpRequest","DeferredLock","DeferredList","gatherResults","maybeDeferred","doXHR"];
+MochiKit.Async.EXPORT_OK=["evalJSONRequest"];
+MochiKit.Async.__new__=function(){
+var m=MochiKit.Base;
+var ne=m.partial(m._newNamedError,this);
+ne("AlreadyCalledError",function(_2ed){
+this.deferred=_2ed;
+});
+ne("CancelledError",function(_2ee){
+this.deferred=_2ee;
+});
+ne("BrowserComplianceError",function(msg){
+this.message=msg;
+});
+ne("GenericError",function(msg){
+this.message=msg;
+});
+ne("XMLHttpRequestError",function(req,msg){
+this.req=req;
+this.message=msg;
+try{
+this.number=req.status;
+}
+catch(e){
+}
+});
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+};
+MochiKit.Async.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Async);
+MochiKit.Base._deps("DOM",["Base"]);
+MochiKit.DOM.NAME="MochiKit.DOM";
+MochiKit.DOM.VERSION="1.4.2";
+MochiKit.DOM.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.DOM.toString=function(){
+return this.__repr__();
+};
+MochiKit.DOM.EXPORT=["removeEmptyTextNodes","formContents","currentWindow","currentDocument","withWindow","withDocument","registerDOMConverter","coerceToDOM","createDOM","createDOMFunc","isChildNode","getNodeAttribute","removeNodeAttribute","setNodeAttribute","updateNodeAttributes","appendChildNodes","insertSiblingNodesAfter","insertSiblingNodesBefore","replaceChildNodes","removeElement","swapDOM","BUTTON","TT","PRE","H1","H2","H3","H4","H5","H6","BR","CANVAS","HR","LABEL","TEXTAREA","FORM","STRONG","SELECT","OPTION","OPTGROUP","LEGEND","FIELDSET","P","UL","OL","LI","DL","DT","DD","TD","TR","THEAD","TBODY","TFOOT","TABLE","TH","INPUT","SPAN","A","DIV","IMG","getElement","$","getElementsByTagAndClassName","addToCallStack","addLoadEvent","focusOnLoad","setElementClass","toggleElementClass","addElementClass","removeElementClass","swapElementClass","hasElementClass","computedStyle","escapeHTML","toHTML","emitHTML","scrapeText","getFirstParentByTagAndClassName","getFirstElementByTagAndClassName"];
+MochiKit.DOM.EXPORT_OK=["domConverters"];
+MochiKit.DOM.DEPRECATED=[["computedStyle","MochiKit.Style.getStyle","1.4"],["elementDimensions","MochiKit.Style.getElementDimensions","1.4"],["elementPosition","MochiKit.Style.getElementPosition","1.4"],["getViewportDimensions","MochiKit.Style.getViewportDimensions","1.4"],["hideElement","MochiKit.Style.hideElement","1.4"],["makeClipping","MochiKit.Style.makeClipping","1.4.1"],["makePositioned","MochiKit.Style.makePositioned","1.4.1"],["setElementDimensions","MochiKit.Style.setElementDimensions","1.4"],["setElementPosition","MochiKit.Style.setElementPosition","1.4"],["setDisplayForElement","MochiKit.Style.setDisplayForElement","1.4"],["setOpacity","MochiKit.Style.setOpacity","1.4"],["showElement","MochiKit.Style.showElement","1.4"],["undoClipping","MochiKit.Style.undoClipping","1.4.1"],["undoPositioned","MochiKit.Style.undoPositioned","1.4.1"],["Coordinates","MochiKit.Style.Coordinates","1.4"],["Dimensions","MochiKit.Style.Dimensions","1.4"]];
+MochiKit.Base.update(MochiKit.DOM,{currentWindow:function(){
+return MochiKit.DOM._window;
+},currentDocument:function(){
+return MochiKit.DOM._document;
+},withWindow:function(win,func){
+var self=MochiKit.DOM;
+var _2f6=self._document;
+var _2f7=self._window;
+var rval;
+try{
+self._window=win;
+self._document=win.document;
+rval=func();
+}
+catch(e){
+self._window=_2f7;
+self._document=_2f6;
+throw e;
+}
+self._window=_2f7;
+self._document=_2f6;
+return rval;
+},formContents:function(elem){
+var _2fa=[];
+var _2fb=[];
+var m=MochiKit.Base;
+var self=MochiKit.DOM;
+if(typeof (elem)=="undefined"||elem===null){
+elem=self._document.body;
+}else{
+elem=self.getElement(elem);
+}
+m.nodeWalk(elem,function(elem){
+var name=elem.name;
+if(m.isNotEmpty(name)){
+var _300=elem.tagName.toUpperCase();
+if(_300==="INPUT"&&(elem.type=="radio"||elem.type=="checkbox")&&!elem.checked){
+return null;
+}
+if(_300==="SELECT"){
+if(elem.type=="select-one"){
+if(elem.selectedIndex>=0){
+var opt=elem.options[elem.selectedIndex];
+var v=opt.value;
+if(!v){
+var h=opt.outerHTML;
+if(h&&!h.match(/^[^>]+\svalue\s*=/i)){
+v=opt.text;
+}
+}
+_2fa.push(name);
+_2fb.push(v);
+return null;
+}
+_2fa.push(name);
+_2fb.push("");
+return null;
+}else{
+var opts=elem.options;
+if(!opts.length){
+_2fa.push(name);
+_2fb.push("");
+return null;
+}
+for(var i=0;i<opts.length;i++){
+var opt=opts[i];
+if(!opt.selected){
+continue;
+}
+var v=opt.value;
+if(!v){
+var h=opt.outerHTML;
+if(h&&!h.match(/^[^>]+\svalue\s*=/i)){
+v=opt.text;
+}
+}
+_2fa.push(name);
+_2fb.push(v);
+}
+return null;
+}
+}
+if(_300==="FORM"||_300==="P"||_300==="SPAN"||_300==="DIV"){
+return elem.childNodes;
+}
+_2fa.push(name);
+_2fb.push(elem.value||"");
+return null;
+}
+return elem.childNodes;
+});
+return [_2fa,_2fb];
+},withDocument:function(doc,func){
+var self=MochiKit.DOM;
+var _309=self._document;
+var rval;
+try{
+self._document=doc;
+rval=func();
+}
+catch(e){
+self._document=_309;
+throw e;
+}
+self._document=_309;
+return rval;
+},registerDOMConverter:function(name,_30c,wrap,_30e){
+MochiKit.DOM.domConverters.register(name,_30c,wrap,_30e);
+},coerceToDOM:function(node,ctx){
+var m=MochiKit.Base;
+var im=MochiKit.Iter;
+var self=MochiKit.DOM;
+if(im){
+var iter=im.iter;
+var _315=im.repeat;
+}
+var map=m.map;
+var _317=self.domConverters;
+var _318=arguments.callee;
+var _319=m.NotFound;
+while(true){
+if(typeof (node)=="undefined"||node===null){
+return null;
+}
+if(typeof (node)=="function"&&typeof (node.length)=="number"&&!(node instanceof Function)){
+node=im?im.list(node):m.extend(null,node);
+}
+if(typeof (node.nodeType)!="undefined"&&node.nodeType>0){
+return node;
+}
+if(typeof (node)=="number"||typeof (node)=="boolean"){
+node=node.toString();
+}
+if(typeof (node)=="string"){
+return self._document.createTextNode(node);
+}
+if(typeof (node.__dom__)=="function"){
+node=node.__dom__(ctx);
+continue;
+}
+if(typeof (node.dom)=="function"){
+node=node.dom(ctx);
+continue;
+}
+if(typeof (node)=="function"){
+node=node.apply(ctx,[ctx]);
+continue;
+}
+if(im){
+var _31a=null;
+try{
+_31a=iter(node);
+}
+catch(e){
+}
+if(_31a){
+return map(_318,_31a,_315(ctx));
+}
+}else{
+if(m.isArrayLike(node)){
+var func=function(n){
+return _318(n,ctx);
+};
+return map(func,node);
+}
+}
+try{
+node=_317.match(node,ctx);
+continue;
+}
+catch(e){
+if(e!=_319){
+throw e;
+}
+}
+return self._document.createTextNode(node.toString());
+}
+return undefined;
+},isChildNode:function(node,_31e){
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+node=self.getElement(node);
+}
+if(typeof (_31e)=="string"){
+_31e=self.getElement(_31e);
+}
+if(typeof (node)=="undefined"||node===null){
+return false;
+}
+while(node!=null&&node!==self._document){
+if(node===_31e){
+return true;
+}
+node=node.parentNode;
+}
+return false;
+},setNodeAttribute:function(node,attr,_322){
+var o={};
+o[attr]=_322;
+try{
+return MochiKit.DOM.updateNodeAttributes(node,o);
+}
+catch(e){
+}
+return null;
+},getNodeAttribute:function(node,attr){
+var self=MochiKit.DOM;
+var _327=self.attributeArray.renames[attr];
+var _328=self.attributeArray.ignoreAttr[attr];
+node=self.getElement(node);
+try{
+if(_327){
+return node[_327];
+}
+var _329=node.getAttribute(attr);
+if(_329!=_328){
+return _329;
+}
+}
+catch(e){
+}
+return null;
+},removeNodeAttribute:function(node,attr){
+var self=MochiKit.DOM;
+var _32d=self.attributeArray.renames[attr];
+node=self.getElement(node);
+try{
+if(_32d){
+return node[_32d];
+}
+return node.removeAttribute(attr);
+}
+catch(e){
+}
+return null;
+},updateNodeAttributes:function(node,_32f){
+var elem=node;
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+elem=self.getElement(node);
+}
+if(_32f){
+var _332=MochiKit.Base.updatetree;
+if(self.attributeArray.compliant){
+for(var k in _32f){
+var v=_32f[k];
+if(typeof (v)=="object"&&typeof (elem[k])=="object"){
+if(k=="style"&&MochiKit.Style){
+MochiKit.Style.setStyle(elem,v);
+}else{
+_332(elem[k],v);
+}
+}else{
+if(k.substring(0,2)=="on"){
+if(typeof (v)=="string"){
+v=new Function(v);
+}
+elem[k]=v;
+}else{
+elem.setAttribute(k,v);
+}
+}
+if(typeof (elem[k])=="string"&&elem[k]!=v){
+elem[k]=v;
+}
+}
+}else{
+var _335=self.attributeArray.renames;
+for(var k in _32f){
+v=_32f[k];
+var _336=_335[k];
+if(k=="style"&&typeof (v)=="string"){
+elem.style.cssText=v;
+}else{
+if(typeof (_336)=="string"){
+elem[_336]=v;
+}else{
+if(typeof (elem[k])=="object"&&typeof (v)=="object"){
+if(k=="style"&&MochiKit.Style){
+MochiKit.Style.setStyle(elem,v);
+}else{
+_332(elem[k],v);
+}
+}else{
+if(k.substring(0,2)=="on"){
+if(typeof (v)=="string"){
+v=new Function(v);
+}
+elem[k]=v;
+}else{
+elem.setAttribute(k,v);
+}
+}
+}
+}
+if(typeof (elem[k])=="string"&&elem[k]!=v){
+elem[k]=v;
+}
+}
+}
+}
+return elem;
+},appendChildNodes:function(node){
+var elem=node;
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+elem=self.getElement(node);
+}
+var _33a=[self.coerceToDOM(MochiKit.Base.extend(null,arguments,1),elem)];
+var _33b=MochiKit.Base.concat;
+while(_33a.length){
+var n=_33a.shift();
+if(typeof (n)=="undefined"||n===null){
+}else{
+if(typeof (n.nodeType)=="number"){
+elem.appendChild(n);
+}else{
+_33a=_33b(n,_33a);
+}
+}
+}
+return elem;
+},insertSiblingNodesBefore:function(node){
+var elem=node;
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+elem=self.getElement(node);
+}
+var _340=[self.coerceToDOM(MochiKit.Base.extend(null,arguments,1),elem)];
+var _341=elem.parentNode;
+var _342=MochiKit.Base.concat;
+while(_340.length){
+var n=_340.shift();
+if(typeof (n)=="undefined"||n===null){
+}else{
+if(typeof (n.nodeType)=="number"){
+_341.insertBefore(n,elem);
+}else{
+_340=_342(n,_340);
+}
+}
+}
+return _341;
+},insertSiblingNodesAfter:function(node){
+var elem=node;
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+elem=self.getElement(node);
+}
+var _347=[self.coerceToDOM(MochiKit.Base.extend(null,arguments,1),elem)];
+if(elem.nextSibling){
+return self.insertSiblingNodesBefore(elem.nextSibling,_347);
+}else{
+return self.appendChildNodes(elem.parentNode,_347);
+}
+},replaceChildNodes:function(node){
+var elem=node;
+var self=MochiKit.DOM;
+if(typeof (node)=="string"){
+elem=self.getElement(node);
+arguments[0]=elem;
+}
+var _34b;
+while((_34b=elem.firstChild)){
+elem.removeChild(_34b);
+}
+if(arguments.length<2){
+return elem;
+}else{
+return self.appendChildNodes.apply(this,arguments);
+}
+},createDOM:function(name,_34d){
+var elem;
+var self=MochiKit.DOM;
+var m=MochiKit.Base;
+if(typeof (_34d)=="string"||typeof (_34d)=="number"){
+var args=m.extend([name,null],arguments,1);
+return arguments.callee.apply(this,args);
+}
+if(typeof (name)=="string"){
+var _352=self._xhtml;
+if(_34d&&!self.attributeArray.compliant){
+var _353="";
+if("name" in _34d){
+_353+=" name=\""+self.escapeHTML(_34d.name)+"\"";
+}
+if(name=="input"&&"type" in _34d){
+_353+=" type=\""+self.escapeHTML(_34d.type)+"\"";
+}
+if(_353){
+name="<"+name+_353+">";
+_352=false;
+}
+}
+var d=self._document;
+if(_352&&d===document){
+elem=d.createElementNS("http://www.w3.org/1999/xhtml",name);
+}else{
+elem=d.createElement(name);
+}
+}else{
+elem=name;
+}
+if(_34d){
+self.updateNodeAttributes(elem,_34d);
+}
+if(arguments.length<=2){
+return elem;
+}else{
+var args=m.extend([elem],arguments,2);
+return self.appendChildNodes.apply(this,args);
+}
+},createDOMFunc:function(){
+var m=MochiKit.Base;
+return m.partial.apply(this,m.extend([MochiKit.DOM.createDOM],arguments));
+},removeElement:function(elem){
+var self=MochiKit.DOM;
+var e=self.coerceToDOM(self.getElement(elem));
+e.parentNode.removeChild(e);
+return e;
+},swapDOM:function(dest,src){
+var self=MochiKit.DOM;
+dest=self.getElement(dest);
+var _35c=dest.parentNode;
+if(src){
+src=self.coerceToDOM(self.getElement(src),_35c);
+_35c.replaceChild(src,dest);
+}else{
+_35c.removeChild(dest);
+}
+return src;
+},getElement:function(id){
+var self=MochiKit.DOM;
+if(arguments.length==1){
+return ((typeof (id)=="string")?self._document.getElementById(id):id);
+}else{
+return MochiKit.Base.map(self.getElement,arguments);
+}
+},getElementsByTagAndClassName:function(_35f,_360,_361){
+var self=MochiKit.DOM;
+if(typeof (_35f)=="undefined"||_35f===null){
+_35f="*";
+}
+if(typeof (_361)=="undefined"||_361===null){
+_361=self._document;
+}
+_361=self.getElement(_361);
+if(_361==null){
+return [];
+}
+var _363=(_361.getElementsByTagName(_35f)||self._document.all);
+if(typeof (_360)=="undefined"||_360===null){
+return MochiKit.Base.extend(null,_363);
+}
+var _364=[];
+for(var i=0;i<_363.length;i++){
+var _366=_363[i];
+var cls=_366.className;
+if(typeof (cls)!="string"){
+cls=_366.getAttribute("class");
+}
+if(typeof (cls)=="string"){
+var _368=cls.split(" ");
+for(var j=0;j<_368.length;j++){
+if(_368[j]==_360){
+_364.push(_366);
+break;
+}
+}
+}
+}
+return _364;
+},_newCallStack:function(path,once){
+var rval=function(){
+var _36d=arguments.callee.callStack;
+for(var i=0;i<_36d.length;i++){
+if(_36d[i].apply(this,arguments)===false){
+break;
+}
+}
+if(once){
+try{
+this[path]=null;
+}
+catch(e){
+}
+}
+};
+rval.callStack=[];
+return rval;
+},addToCallStack:function(_36f,path,func,once){
+var self=MochiKit.DOM;
+var _374=_36f[path];
+var _375=_374;
+if(!(typeof (_374)=="function"&&typeof (_374.callStack)=="object"&&_374.callStack!==null)){
+_375=self._newCallStack(path,once);
+if(typeof (_374)=="function"){
+_375.callStack.push(_374);
+}
+_36f[path]=_375;
+}
+_375.callStack.push(func);
+},addLoadEvent:function(func){
+var self=MochiKit.DOM;
+self.addToCallStack(self._window,"onload",func,true);
+},focusOnLoad:function(_378){
+var self=MochiKit.DOM;
+self.addLoadEvent(function(){
+_378=self.getElement(_378);
+if(_378){
+_378.focus();
+}
+});
+},setElementClass:function(_37a,_37b){
+var self=MochiKit.DOM;
+var obj=self.getElement(_37a);
+if(self.attributeArray.compliant){
+obj.setAttribute("class",_37b);
+}else{
+obj.setAttribute("className",_37b);
+}
+},toggleElementClass:function(_37e){
+var self=MochiKit.DOM;
+for(var i=1;i<arguments.length;i++){
+var obj=self.getElement(arguments[i]);
+if(!self.addElementClass(obj,_37e)){
+self.removeElementClass(obj,_37e);
+}
+}
+},addElementClass:function(_382,_383){
+var self=MochiKit.DOM;
+var obj=self.getElement(_382);
+var cls=obj.className;
+if(typeof (cls)!="string"){
+cls=obj.getAttribute("class");
+}
+if(typeof (cls)!="string"||cls.length===0){
+self.setElementClass(obj,_383);
+return true;
+}
+if(cls==_383){
+return false;
+}
+var _387=cls.split(" ");
+for(var i=0;i<_387.length;i++){
+if(_387[i]==_383){
+return false;
+}
+}
+self.setElementClass(obj,cls+" "+_383);
+return true;
+},removeElementClass:function(_389,_38a){
+var self=MochiKit.DOM;
+var obj=self.getElement(_389);
+var cls=obj.className;
+if(typeof (cls)!="string"){
+cls=obj.getAttribute("class");
+}
+if(typeof (cls)!="string"||cls.length===0){
+return false;
+}
+if(cls==_38a){
+self.setElementClass(obj,"");
+return true;
+}
+var _38e=cls.split(" ");
+for(var i=0;i<_38e.length;i++){
+if(_38e[i]==_38a){
+_38e.splice(i,1);
+self.setElementClass(obj,_38e.join(" "));
+return true;
+}
+}
+return false;
+},swapElementClass:function(_390,_391,_392){
+var obj=MochiKit.DOM.getElement(_390);
+var res=MochiKit.DOM.removeElementClass(obj,_391);
+if(res){
+MochiKit.DOM.addElementClass(obj,_392);
+}
+return res;
+},hasElementClass:function(_395,_396){
+var obj=MochiKit.DOM.getElement(_395);
+if(obj==null){
+return false;
+}
+var cls=obj.className;
+if(typeof (cls)!="string"){
+cls=obj.getAttribute("class");
+}
+if(typeof (cls)!="string"){
+return false;
+}
+var _399=cls.split(" ");
+for(var i=1;i<arguments.length;i++){
+var good=false;
+for(var j=0;j<_399.length;j++){
+if(_399[j]==arguments[i]){
+good=true;
+break;
+}
+}
+if(!good){
+return false;
+}
+}
+return true;
+},escapeHTML:function(s){
+return s.replace(/&/g,"&amp;").replace(/"/g,"&quot;").replace(/</g,"&lt;").replace(/>/g,"&gt;");
+},toHTML:function(dom){
+return MochiKit.DOM.emitHTML(dom).join("");
+},emitHTML:function(dom,lst){
+if(typeof (lst)=="undefined"||lst===null){
+lst=[];
+}
+var _3a1=[dom];
+var self=MochiKit.DOM;
+var _3a3=self.escapeHTML;
+var _3a4=self.attributeArray;
+while(_3a1.length){
+dom=_3a1.pop();
+if(typeof (dom)=="string"){
+lst.push(dom);
+}else{
+if(dom.nodeType==1){
+lst.push("<"+dom.tagName.toLowerCase());
+var _3a5=[];
+var _3a6=_3a4(dom);
+for(var i=0;i<_3a6.length;i++){
+var a=_3a6[i];
+_3a5.push([" ",a.name,"=\"",_3a3(a.value),"\""]);
+}
+_3a5.sort();
+for(i=0;i<_3a5.length;i++){
+var _3a9=_3a5[i];
+for(var j=0;j<_3a9.length;j++){
+lst.push(_3a9[j]);
+}
+}
+if(dom.hasChildNodes()){
+lst.push(">");
+_3a1.push("</"+dom.tagName.toLowerCase()+">");
+var _3ab=dom.childNodes;
+for(i=_3ab.length-1;i>=0;i--){
+_3a1.push(_3ab[i]);
+}
+}else{
+lst.push("/>");
+}
+}else{
+if(dom.nodeType==3){
+lst.push(_3a3(dom.nodeValue));
+}
+}
+}
+}
+return lst;
+},scrapeText:function(node,_3ad){
+var rval=[];
+(function(node){
+var cn=node.childNodes;
+if(cn){
+for(var i=0;i<cn.length;i++){
+arguments.callee.call(this,cn[i]);
+}
+}
+var _3b2=node.nodeValue;
+if(typeof (_3b2)=="string"){
+rval.push(_3b2);
+}
+})(MochiKit.DOM.getElement(node));
+if(_3ad){
+return rval;
+}else{
+return rval.join("");
+}
+},removeEmptyTextNodes:function(_3b3){
+_3b3=MochiKit.DOM.getElement(_3b3);
+for(var i=0;i<_3b3.childNodes.length;i++){
+var node=_3b3.childNodes[i];
+if(node.nodeType==3&&!/\S/.test(node.nodeValue)){
+node.parentNode.removeChild(node);
+}
+}
+},getFirstElementByTagAndClassName:function(_3b6,_3b7,_3b8){
+var self=MochiKit.DOM;
+if(typeof (_3b6)=="undefined"||_3b6===null){
+_3b6="*";
+}
+if(typeof (_3b8)=="undefined"||_3b8===null){
+_3b8=self._document;
+}
+_3b8=self.getElement(_3b8);
+if(_3b8==null){
+return null;
+}
+var _3ba=(_3b8.getElementsByTagName(_3b6)||self._document.all);
+if(_3ba.length<=0){
+return null;
+}else{
+if(typeof (_3b7)=="undefined"||_3b7===null){
+return _3ba[0];
+}
+}
+for(var i=0;i<_3ba.length;i++){
+var _3bc=_3ba[i];
+var cls=_3bc.className;
+if(typeof (cls)!="string"){
+cls=_3bc.getAttribute("class");
+}
+if(typeof (cls)=="string"){
+var _3be=cls.split(" ");
+for(var j=0;j<_3be.length;j++){
+if(_3be[j]==_3b7){
+return _3bc;
+}
+}
+}
+}
+return null;
+},getFirstParentByTagAndClassName:function(elem,_3c1,_3c2){
+var self=MochiKit.DOM;
+elem=self.getElement(elem);
+if(typeof (_3c1)=="undefined"||_3c1===null){
+_3c1="*";
+}else{
+_3c1=_3c1.toUpperCase();
+}
+if(typeof (_3c2)=="undefined"||_3c2===null){
+_3c2=null;
+}
+if(elem){
+elem=elem.parentNode;
+}
+while(elem&&elem.tagName){
+var _3c4=elem.tagName.toUpperCase();
+if((_3c1==="*"||_3c1==_3c4)&&(_3c2===null||self.hasElementClass(elem,_3c2))){
+return elem;
+}
+elem=elem.parentNode;
+}
+return null;
+},__new__:function(win){
+var m=MochiKit.Base;
+if(typeof (document)!="undefined"){
+this._document=document;
+var _3c7="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
+this._xhtml=(document.documentElement&&document.createElementNS&&document.documentElement.namespaceURI===_3c7);
+}else{
+if(MochiKit.MockDOM){
+this._document=MochiKit.MockDOM.document;
+}
+}
+this._window=win;
+this.domConverters=new m.AdapterRegistry();
+var _3c8=this._document.createElement("span");
+var _3c9;
+if(_3c8&&_3c8.attributes&&_3c8.attributes.length>0){
+var _3ca=m.filter;
+_3c9=function(node){
+return _3ca(_3c9.ignoreAttrFilter,node.attributes);
+};
+_3c9.ignoreAttr={};
+var _3cc=_3c8.attributes;
+var _3cd=_3c9.ignoreAttr;
+for(var i=0;i<_3cc.length;i++){
+var a=_3cc[i];
+_3cd[a.name]=a.value;
+}
+_3c9.ignoreAttrFilter=function(a){
+return (_3c9.ignoreAttr[a.name]!=a.value);
+};
+_3c9.compliant=false;
+_3c9.renames={"class":"className","checked":"defaultChecked","usemap":"useMap","for":"htmlFor","readonly":"readOnly","colspan":"colSpan","bgcolor":"bgColor","cellspacing":"cellSpacing","cellpadding":"cellPadding"};
+}else{
+_3c9=function(node){
+return node.attributes;
+};
+_3c9.compliant=true;
+_3c9.ignoreAttr={};
+_3c9.renames={};
+}
+this.attributeArray=_3c9;
+var _3d2=function(_3d3,arr){
+var _3d5=arr[0];
+var _3d6=arr[1];
+var _3d7=_3d6.split(".")[1];
+var str="";
+str+="if (!MochiKit."+_3d7+") { throw new Error(\"";
+str+="This function has been deprecated and depends on MochiKit.";
+str+=_3d7+".\");}";
+str+="return "+_3d6+".apply(this, arguments);";
+MochiKit[_3d3][_3d5]=new Function(str);
+};
+for(var i=0;i<MochiKit.DOM.DEPRECATED.length;i++){
+_3d2("DOM",MochiKit.DOM.DEPRECATED[i]);
+}
+var _3d9=this.createDOMFunc;
+this.UL=_3d9("ul");
+this.OL=_3d9("ol");
+this.LI=_3d9("li");
+this.DL=_3d9("dl");
+this.DT=_3d9("dt");
+this.DD=_3d9("dd");
+this.TD=_3d9("td");
+this.TR=_3d9("tr");
+this.TBODY=_3d9("tbody");
+this.THEAD=_3d9("thead");
+this.TFOOT=_3d9("tfoot");
+this.TABLE=_3d9("table");
+this.TH=_3d9("th");
+this.INPUT=_3d9("input");
+this.SPAN=_3d9("span");
+this.A=_3d9("a");
+this.DIV=_3d9("div");
+this.IMG=_3d9("img");
+this.BUTTON=_3d9("button");
+this.TT=_3d9("tt");
+this.PRE=_3d9("pre");
+this.H1=_3d9("h1");
+this.H2=_3d9("h2");
+this.H3=_3d9("h3");
+this.H4=_3d9("h4");
+this.H5=_3d9("h5");
+this.H6=_3d9("h6");
+this.BR=_3d9("br");
+this.HR=_3d9("hr");
+this.LABEL=_3d9("label");
+this.TEXTAREA=_3d9("textarea");
+this.FORM=_3d9("form");
+this.P=_3d9("p");
+this.SELECT=_3d9("select");
+this.OPTION=_3d9("option");
+this.OPTGROUP=_3d9("optgroup");
+this.LEGEND=_3d9("legend");
+this.FIELDSET=_3d9("fieldset");
+this.STRONG=_3d9("strong");
+this.CANVAS=_3d9("canvas");
+this.$=this.getElement;
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+}});
+MochiKit.DOM.__new__(((typeof (window)=="undefined")?this:window));
+if(MochiKit.__export__){
+withWindow=MochiKit.DOM.withWindow;
+withDocument=MochiKit.DOM.withDocument;
+}
+MochiKit.Base._exportSymbols(this,MochiKit.DOM);
+MochiKit.Base._deps("Selector",["Base","DOM","Iter"]);
+MochiKit.Selector.NAME="MochiKit.Selector";
+MochiKit.Selector.VERSION="1.4.2";
+MochiKit.Selector.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Selector.toString=function(){
+return this.__repr__();
+};
+MochiKit.Selector.EXPORT=["Selector","findChildElements","findDocElements","$$"];
+MochiKit.Selector.EXPORT_OK=[];
+MochiKit.Selector.Selector=function(_3da){
+this.params={classNames:[],pseudoClassNames:[]};
+this.expression=_3da.toString().replace(/(^\s+|\s+$)/g,"");
+this.parseExpression();
+this.compileMatcher();
+};
+MochiKit.Selector.Selector.prototype={__class__:MochiKit.Selector.Selector,parseExpression:function(){
+function abort(_3db){
+throw "Parse error in selector: "+_3db;
+}
+if(this.expression==""){
+abort("empty expression");
+}
+var repr=MochiKit.Base.repr;
+var _3dd=this.params;
+var expr=this.expression;
+var _3df,_3e0,_3e1,rest;
+while(_3df=expr.match(/^(.*)\[([a-z0-9_:-]+?)(?:([~\|!^$*]?=)(?:"([^"]*)"|([^\]\s]*)))?\]$/i)){
+_3dd.attributes=_3dd.attributes||[];
+_3dd.attributes.push({name:_3df[2],operator:_3df[3],value:_3df[4]||_3df[5]||""});
+expr=_3df[1];
+}
+if(expr=="*"){
+return this.params.wildcard=true;
+}
+while(_3df=expr.match(/^([^a-z0-9_-])?([a-z0-9_-]+(?:\([^)]*\))?)(.*)/i)){
+_3e0=_3df[1];
+_3e1=_3df[2];
+rest=_3df[3];
+switch(_3e0){
+case "#":
+_3dd.id=_3e1;
+break;
+case ".":
+_3dd.classNames.push(_3e1);
+break;
+case ":":
+_3dd.pseudoClassNames.push(_3e1);
+break;
+case "":
+case undefined:
+_3dd.tagName=_3e1.toUpperCase();
+break;
+default:
+abort(repr(expr));
+}
+expr=rest;
+}
+if(expr.length>0){
+abort(repr(expr));
+}
+},buildMatchExpression:function(){
+var repr=MochiKit.Base.repr;
+var _3e4=this.params;
+var _3e5=[];
+var _3e6,i;
+function childElements(_3e8){
+return "MochiKit.Base.filter(function (node) { return node.nodeType == 1; }, "+_3e8+".childNodes)";
+}
+if(_3e4.wildcard){
+_3e5.push("true");
+}
+if(_3e6=_3e4.id){
+_3e5.push("element.id == "+repr(_3e6));
+}
+if(_3e6=_3e4.tagName){
+_3e5.push("element.tagName.toUpperCase() == "+repr(_3e6));
+}
+if((_3e6=_3e4.classNames).length>0){
+for(i=0;i<_3e6.length;i++){
+_3e5.push("MochiKit.DOM.hasElementClass(element, "+repr(_3e6[i])+")");
+}
+}
+if((_3e6=_3e4.pseudoClassNames).length>0){
+for(i=0;i<_3e6.length;i++){
+var _3e9=_3e6[i].match(/^([^(]+)(?:\((.*)\))?$/);
+var _3ea=_3e9[1];
+var _3eb=_3e9[2];
+switch(_3ea){
+case "root":
+_3e5.push("element.nodeType == 9 || element === element.ownerDocument.documentElement");
+break;
+case "nth-child":
+case "nth-last-child":
+case "nth-of-type":
+case "nth-last-of-type":
+_3e9=_3eb.match(/^((?:(\d+)n\+)?(\d+)|odd|even)$/);
+if(!_3e9){
+throw "Invalid argument to pseudo element nth-child: "+_3eb;
+}
+var a,b;
+if(_3e9[0]=="odd"){
+a=2;
+b=1;
+}else{
+if(_3e9[0]=="even"){
+a=2;
+b=0;
+}else{
+a=_3e9[2]&&parseInt(_3e9)||null;
+b=parseInt(_3e9[3]);
+}
+}
+_3e5.push("this.nthChild(element,"+a+","+b+","+!!_3ea.match("^nth-last")+","+!!_3ea.match("of-type$")+")");
+break;
+case "first-child":
+_3e5.push("this.nthChild(element, null, 1)");
+break;
+case "last-child":
+_3e5.push("this.nthChild(element, null, 1, true)");
+break;
+case "first-of-type":
+_3e5.push("this.nthChild(element, null, 1, false, true)");
+break;
+case "last-of-type":
+_3e5.push("this.nthChild(element, null, 1, true, true)");
+break;
+case "only-child":
+_3e5.push(childElements("element.parentNode")+".length == 1");
+break;
+case "only-of-type":
+_3e5.push("MochiKit.Base.filter(function (node) { return node.tagName == element.tagName; }, "+childElements("element.parentNode")+").length == 1");
+break;
+case "empty":
+_3e5.push("element.childNodes.length == 0");
+break;
+case "enabled":
+_3e5.push("(this.isUIElement(element) && element.disabled === false)");
+break;
+case "disabled":
+_3e5.push("(this.isUIElement(element) && element.disabled === true)");
+break;
+case "checked":
+_3e5.push("(this.isUIElement(element) && element.checked === true)");
+break;
+case "not":
+var _3ee=new MochiKit.Selector.Selector(_3eb);
+_3e5.push("!( "+_3ee.buildMatchExpression()+")");
+break;
+}
+}
+}
+if(_3e6=_3e4.attributes){
+MochiKit.Base.map(function(_3ef){
+var _3f0="MochiKit.DOM.getNodeAttribute(element, "+repr(_3ef.name)+")";
+var _3f1=function(_3f2){
+return _3f0+".split("+repr(_3f2)+")";
+};
+_3e5.push(_3f0+" != null");
+switch(_3ef.operator){
+case "=":
+_3e5.push(_3f0+" == "+repr(_3ef.value));
+break;
+case "~=":
+_3e5.push("MochiKit.Base.findValue("+_3f1(" ")+", "+repr(_3ef.value)+") > -1");
+break;
+case "^=":
+_3e5.push(_3f0+".substring(0, "+_3ef.value.length+") == "+repr(_3ef.value));
+break;
+case "$=":
+_3e5.push(_3f0+".substring("+_3f0+".length - "+_3ef.value.length+") == "+repr(_3ef.value));
+break;
+case "*=":
+_3e5.push(_3f0+".match("+repr(_3ef.value)+")");
+break;
+case "|=":
+_3e5.push(_3f1("-")+"[0].toUpperCase() == "+repr(_3ef.value.toUpperCase()));
+break;
+case "!=":
+_3e5.push(_3f0+" != "+repr(_3ef.value));
+break;
+case "":
+case undefined:
+break;
+default:
+throw "Unknown operator "+_3ef.operator+" in selector";
+}
+},_3e6);
+}
+return _3e5.join(" && ");
+},compileMatcher:function(){
+var code="return (!element.tagName) ? false : "+this.buildMatchExpression()+";";
+this.match=new Function("element",code);
+},nthChild:function(_3f4,a,b,_3f7,_3f8){
+var _3f9=MochiKit.Base.filter(function(node){
+return node.nodeType==1;
+},_3f4.parentNode.childNodes);
+if(_3f8){
+_3f9=MochiKit.Base.filter(function(node){
+return node.tagName==_3f4.tagName;
+},_3f9);
+}
+if(_3f7){
+_3f9=MochiKit.Iter.reversed(_3f9);
+}
+if(a){
+var _3fc=MochiKit.Base.findIdentical(_3f9,_3f4);
+return ((_3fc+1-b)/a)%1==0;
+}else{
+return b==MochiKit.Base.findIdentical(_3f9,_3f4)+1;
+}
+},isUIElement:function(_3fd){
+return MochiKit.Base.findValue(["input","button","select","option","textarea","object"],_3fd.tagName.toLowerCase())>-1;
+},findElements:function(_3fe,axis){
+var _400;
+if(axis==undefined){
+axis="";
+}
+function inScope(_401,_402){
+if(axis==""){
+return MochiKit.DOM.isChildNode(_401,_402);
+}else{
+if(axis==">"){
+return _401.parentNode===_402;
+}else{
+if(axis=="+"){
+return _401===nextSiblingElement(_402);
+}else{
+if(axis=="~"){
+var _403=_402;
+while(_403=nextSiblingElement(_403)){
+if(_401===_403){
+return true;
+}
+}
+return false;
+}else{
+throw "Invalid axis: "+axis;
+}
+}
+}
+}
+}
+if(_400=MochiKit.DOM.getElement(this.params.id)){
+if(this.match(_400)){
+if(!_3fe||inScope(_400,_3fe)){
+return [_400];
+}
+}
+}
+function nextSiblingElement(node){
+node=node.nextSibling;
+while(node&&node.nodeType!=1){
+node=node.nextSibling;
+}
+return node;
+}
+if(axis==""){
+_3fe=(_3fe||MochiKit.DOM.currentDocument()).getElementsByTagName(this.params.tagName||"*");
+}else{
+if(axis==">"){
+if(!_3fe){
+throw "> combinator not allowed without preceeding expression";
+}
+_3fe=MochiKit.Base.filter(function(node){
+return node.nodeType==1;
+},_3fe.childNodes);
+}else{
+if(axis=="+"){
+if(!_3fe){
+throw "+ combinator not allowed without preceeding expression";
+}
+_3fe=nextSiblingElement(_3fe)&&[nextSiblingElement(_3fe)];
+}else{
+if(axis=="~"){
+if(!_3fe){
+throw "~ combinator not allowed without preceeding expression";
+}
+var _406=[];
+while(nextSiblingElement(_3fe)){
+_3fe=nextSiblingElement(_3fe);
+_406.push(_3fe);
+}
+_3fe=_406;
+}
+}
+}
+}
+if(!_3fe){
+return [];
+}
+var _407=MochiKit.Base.filter(MochiKit.Base.bind(function(_408){
+return this.match(_408);
+},this),_3fe);
+return _407;
+},repr:function(){
+return "Selector("+this.expression+")";
+},toString:MochiKit.Base.forwardCall("repr")};
+MochiKit.Base.update(MochiKit.Selector,{findChildElements:function(_409,_40a){
+var uniq=function(arr){
+var res=[];
+for(var i=0;i<arr.length;i++){
+if(MochiKit.Base.findIdentical(res,arr[i])<0){
+res.push(arr[i]);
+}
+}
+return res;
+};
+return MochiKit.Base.flattenArray(MochiKit.Base.map(function(_40f){
+var _410="";
+var _411=function(_412,expr){
+if(match=expr.match(/^[>+~]$/)){
+_410=match[0];
+return _412;
+}else{
+var _414=new MochiKit.Selector.Selector(expr);
+var _415=MochiKit.Iter.reduce(function(_416,_417){
+return MochiKit.Base.extend(_416,_414.findElements(_417||_409,_410));
+},_412,[]);
+_410="";
+return _415;
+}
+};
+var _418=_40f.replace(/(^\s+|\s+$)/g,"").split(/\s+/);
+return uniq(MochiKit.Iter.reduce(_411,_418,[null]));
+},_40a));
+},findDocElements:function(){
+return MochiKit.Selector.findChildElements(MochiKit.DOM.currentDocument(),arguments);
+},__new__:function(){
+var m=MochiKit.Base;
+this.$$=this.findDocElements;
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+}});
+MochiKit.Selector.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Selector);
+MochiKit.Base._deps("Style",["Base","DOM"]);
+MochiKit.Style.NAME="MochiKit.Style";
+MochiKit.Style.VERSION="1.4.2";
+MochiKit.Style.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Style.toString=function(){
+return this.__repr__();
+};
+MochiKit.Style.EXPORT_OK=[];
+MochiKit.Style.EXPORT=["setStyle","setOpacity","getStyle","getElementDimensions","elementDimensions","setElementDimensions","getElementPosition","elementPosition","setElementPosition","makePositioned","undoPositioned","makeClipping","undoClipping","setDisplayForElement","hideElement","showElement","getViewportDimensions","getViewportPosition","Dimensions","Coordinates"];
+MochiKit.Style.Dimensions=function(w,h){
+this.w=w;
+this.h=h;
+};
+MochiKit.Style.Dimensions.prototype.__repr__=function(){
+var repr=MochiKit.Base.repr;
+return "{w: "+repr(this.w)+", h: "+repr(this.h)+"}";
+};
+MochiKit.Style.Dimensions.prototype.toString=function(){
+return this.__repr__();
+};
+MochiKit.Style.Coordinates=function(x,y){
+this.x=x;
+this.y=y;
+};
+MochiKit.Style.Coordinates.prototype.__repr__=function(){
+var repr=MochiKit.Base.repr;
+return "{x: "+repr(this.x)+", y: "+repr(this.y)+"}";
+};
+MochiKit.Style.Coordinates.prototype.toString=function(){
+return this.__repr__();
+};
+MochiKit.Base.update(MochiKit.Style,{getStyle:function(elem,_421){
+var dom=MochiKit.DOM;
+var d=dom._document;
+elem=dom.getElement(elem);
+_421=MochiKit.Base.camelize(_421);
+if(!elem||elem==d){
+return undefined;
+}
+if(_421=="opacity"&&typeof (elem.filters)!="undefined"){
+var _424=(MochiKit.Style.getStyle(elem,"filter")||"").match(/alpha\(opacity=(.*)\)/);
+if(_424&&_424[1]){
+return parseFloat(_424[1])/100;
+}
+return 1;
+}
+if(_421=="float"||_421=="cssFloat"||_421=="styleFloat"){
+if(elem.style["float"]){
+return elem.style["float"];
+}else{
+if(elem.style.cssFloat){
+return elem.style.cssFloat;
+}else{
+if(elem.style.styleFloat){
+return elem.style.styleFloat;
+}else{
+return "none";
+}
+}
+}
+}
+var _425=elem.style?elem.style[_421]:null;
+if(!_425){
+if(d.defaultView&&d.defaultView.getComputedStyle){
+var css=d.defaultView.getComputedStyle(elem,null);
+_421=_421.replace(/([A-Z])/g,"-$1").toLowerCase();
+_425=css?css.getPropertyValue(_421):null;
+}else{
+if(elem.currentStyle){
+_425=elem.currentStyle[_421];
+if(/^\d/.test(_425)&&!/px$/.test(_425)&&_421!="fontWeight"){
+var left=elem.style.left;
+var _428=elem.runtimeStyle.left;
+elem.runtimeStyle.left=elem.currentStyle.left;
+elem.style.left=_425||0;
+_425=elem.style.pixelLeft+"px";
+elem.style.left=left;
+elem.runtimeStyle.left=_428;
+}
+}
+}
+}
+if(_421=="opacity"){
+_425=parseFloat(_425);
+}
+if(/Opera/.test(navigator.userAgent)&&(MochiKit.Base.findValue(["left","top","right","bottom"],_421)!=-1)){
+if(MochiKit.Style.getStyle(elem,"position")=="static"){
+_425="auto";
+}
+}
+return _425=="auto"?null:_425;
+},setStyle:function(elem,_42a){
+elem=MochiKit.DOM.getElement(elem);
+for(var name in _42a){
+switch(name){
+case "opacity":
+MochiKit.Style.setOpacity(elem,_42a[name]);
+break;
+case "float":
+case "cssFloat":
+case "styleFloat":
+if(typeof (elem.style["float"])!="undefined"){
+elem.style["float"]=_42a[name];
+}else{
+if(typeof (elem.style.cssFloat)!="undefined"){
+elem.style.cssFloat=_42a[name];
+}else{
+elem.style.styleFloat=_42a[name];
+}
+}
+break;
+default:
+elem.style[MochiKit.Base.camelize(name)]=_42a[name];
+}
+}
+},setOpacity:function(elem,o){
+elem=MochiKit.DOM.getElement(elem);
+var self=MochiKit.Style;
+if(o==1){
+var _42f=/Gecko/.test(navigator.userAgent)&&!(/Konqueror|AppleWebKit|KHTML/.test(navigator.userAgent));
+elem.style["opacity"]=_42f?0.999999:1;
+if(/MSIE/.test(navigator.userAgent)){
+elem.style["filter"]=self.getStyle(elem,"filter").replace(/alpha\([^\)]*\)/gi,"");
+}
+}else{
+if(o<0.00001){
+o=0;
+}
+elem.style["opacity"]=o;
+if(/MSIE/.test(navigator.userAgent)){
+elem.style["filter"]=self.getStyle(elem,"filter").replace(/alpha\([^\)]*\)/gi,"")+"alpha(opacity="+o*100+")";
+}
+}
+},getElementPosition:function(elem,_431){
+var self=MochiKit.Style;
+var dom=MochiKit.DOM;
+elem=dom.getElement(elem);
+if(!elem||(!(elem.x&&elem.y)&&(!elem.parentNode===null||self.getStyle(elem,"display")=="none"))){
+return undefined;
+}
+var c=new self.Coordinates(0,0);
+var box=null;
+var _436=null;
+var d=MochiKit.DOM._document;
+var de=d.documentElement;
+var b=d.body;
+if(!elem.parentNode&&elem.x&&elem.y){
+c.x+=elem.x||0;
+c.y+=elem.y||0;
+}else{
+if(elem.getBoundingClientRect){
+box=elem.getBoundingClientRect();
+c.x+=box.left+(de.scrollLeft||b.scrollLeft)-(de.clientLeft||0);
+c.y+=box.top+(de.scrollTop||b.scrollTop)-(de.clientTop||0);
+}else{
+if(elem.offsetParent){
+c.x+=elem.offsetLeft;
+c.y+=elem.offsetTop;
+_436=elem.offsetParent;
+if(_436!=elem){
+while(_436){
+c.x+=parseInt(_436.style.borderLeftWidth)||0;
+c.y+=parseInt(_436.style.borderTopWidth)||0;
+c.x+=_436.offsetLeft;
+c.y+=_436.offsetTop;
+_436=_436.offsetParent;
+}
+}
+var ua=navigator.userAgent.toLowerCase();
+if((typeof (opera)!="undefined"&&parseFloat(opera.version())<9)||(ua.indexOf("AppleWebKit")!=-1&&self.getStyle(elem,"position")=="absolute")){
+c.x-=b.offsetLeft;
+c.y-=b.offsetTop;
+}
+if(elem.parentNode){
+_436=elem.parentNode;
+}else{
+_436=null;
+}
+while(_436){
+var _43b=_436.tagName.toUpperCase();
+if(_43b==="BODY"||_43b==="HTML"){
+break;
+}
+var disp=self.getStyle(_436,"display");
+if(disp.search(/^inline|table-row.*$/i)){
+c.x-=_436.scrollLeft;
+c.y-=_436.scrollTop;
+}
+if(_436.parentNode){
+_436=_436.parentNode;
+}else{
+_436=null;
+}
+}
+}
+}
+}
+if(typeof (_431)!="undefined"){
+_431=arguments.callee(_431);
+if(_431){
+c.x-=(_431.x||0);
+c.y-=(_431.y||0);
+}
+}
+return c;
+},setElementPosition:function(elem,_43e,_43f){
+elem=MochiKit.DOM.getElement(elem);
+if(typeof (_43f)=="undefined"){
+_43f="px";
+}
+var _440={};
+var _441=MochiKit.Base.isUndefinedOrNull;
+if(!_441(_43e.x)){
+_440["left"]=_43e.x+_43f;
+}
+if(!_441(_43e.y)){
+_440["top"]=_43e.y+_43f;
+}
+MochiKit.DOM.updateNodeAttributes(elem,{"style":_440});
+},makePositioned:function(_442){
+_442=MochiKit.DOM.getElement(_442);
+var pos=MochiKit.Style.getStyle(_442,"position");
+if(pos=="static"||!pos){
+_442.style.position="relative";
+if(/Opera/.test(navigator.userAgent)){
+_442.style.top=0;
+_442.style.left=0;
+}
+}
+},undoPositioned:function(_444){
+_444=MochiKit.DOM.getElement(_444);
+if(_444.style.position=="relative"){
+_444.style.position=_444.style.top=_444.style.left=_444.style.bottom=_444.style.right="";
+}
+},makeClipping:function(_445){
+_445=MochiKit.DOM.getElement(_445);
+var s=_445.style;
+var _447={"overflow":s.overflow,"overflow-x":s.overflowX,"overflow-y":s.overflowY};
+if((MochiKit.Style.getStyle(_445,"overflow")||"visible")!="hidden"){
+_445.style.overflow="hidden";
+_445.style.overflowX="hidden";
+_445.style.overflowY="hidden";
+}
+return _447;
+},undoClipping:function(_448,_449){
+_448=MochiKit.DOM.getElement(_448);
+if(typeof (_449)=="string"){
+_448.style.overflow=_449;
+}else{
+if(_449!=null){
+_448.style.overflow=_449["overflow"];
+_448.style.overflowX=_449["overflow-x"];
+_448.style.overflowY=_449["overflow-y"];
+}
+}
+},getElementDimensions:function(elem,_44b){
+var self=MochiKit.Style;
+var dom=MochiKit.DOM;
+if(typeof (elem.w)=="number"||typeof (elem.h)=="number"){
+return new self.Dimensions(elem.w||0,elem.h||0);
+}
+elem=dom.getElement(elem);
+if(!elem){
+return undefined;
+}
+var disp=self.getStyle(elem,"display");
+if(disp=="none"||disp==""||typeof (disp)=="undefined"){
+var s=elem.style;
+var _450=s.visibility;
+var _451=s.position;
+var _452=s.display;
+s.visibility="hidden";
+s.position="absolute";
+s.display=self._getDefaultDisplay(elem);
+var _453=elem.offsetWidth;
+var _454=elem.offsetHeight;
+s.display=_452;
+s.position=_451;
+s.visibility=_450;
+}else{
+_453=elem.offsetWidth||0;
+_454=elem.offsetHeight||0;
+}
+if(_44b){
+var _455="colSpan" in elem&&"rowSpan" in elem;
+var _456=(_455&&elem.parentNode&&self.getStyle(elem.parentNode,"borderCollapse")=="collapse");
+if(_456){
+if(/MSIE/.test(navigator.userAgent)){
+var _457=elem.previousSibling?0.5:1;
+var _458=elem.nextSibling?0.5:1;
+}else{
+var _457=0.5;
+var _458=0.5;
+}
+}else{
+var _457=1;
+var _458=1;
+}
+_453-=Math.round((parseFloat(self.getStyle(elem,"paddingLeft"))||0)+(parseFloat(self.getStyle(elem,"paddingRight"))||0)+_457*(parseFloat(self.getStyle(elem,"borderLeftWidth"))||0)+_458*(parseFloat(self.getStyle(elem,"borderRightWidth"))||0));
+if(_455){
+if(/Gecko|Opera/.test(navigator.userAgent)&&!/Konqueror|AppleWebKit|KHTML/.test(navigator.userAgent)){
+var _459=0;
+}else{
+if(/MSIE/.test(navigator.userAgent)){
+var _459=1;
+}else{
+var _459=_456?0.5:1;
+}
+}
+}else{
+var _459=1;
+}
+_454-=Math.round((parseFloat(self.getStyle(elem,"paddingTop"))||0)+(parseFloat(self.getStyle(elem,"paddingBottom"))||0)+_459*((parseFloat(self.getStyle(elem,"borderTopWidth"))||0)+(parseFloat(self.getStyle(elem,"borderBottomWidth"))||0)));
+}
+return new self.Dimensions(_453,_454);
+},setElementDimensions:function(elem,_45b,_45c){
+elem=MochiKit.DOM.getElement(elem);
+if(typeof (_45c)=="undefined"){
+_45c="px";
+}
+var _45d={};
+var _45e=MochiKit.Base.isUndefinedOrNull;
+if(!_45e(_45b.w)){
+_45d["width"]=_45b.w+_45c;
+}
+if(!_45e(_45b.h)){
+_45d["height"]=_45b.h+_45c;
+}
+MochiKit.DOM.updateNodeAttributes(elem,{"style":_45d});
+},_getDefaultDisplay:function(elem){
+var self=MochiKit.Style;
+var dom=MochiKit.DOM;
+elem=dom.getElement(elem);
+if(!elem){
+return undefined;
+}
+var _462=elem.tagName.toUpperCase();
+return self._defaultDisplay[_462]||"block";
+},setDisplayForElement:function(_463,_464){
+var _465=MochiKit.Base.extend(null,arguments,1);
+var _466=MochiKit.DOM.getElement;
+for(var i=0;i<_465.length;i++){
+_464=_466(_465[i]);
+if(_464){
+_464.style.display=_463;
+}
+}
+},getViewportDimensions:function(){
+var d=new MochiKit.Style.Dimensions();
+var w=MochiKit.DOM._window;
+var b=MochiKit.DOM._document.body;
+if(w.innerWidth){
+d.w=w.innerWidth;
+d.h=w.innerHeight;
+}else{
+if(b&&b.parentElement&&b.parentElement.clientWidth){
+d.w=b.parentElement.clientWidth;
+d.h=b.parentElement.clientHeight;
+}else{
+if(b&&b.clientWidth){
+d.w=b.clientWidth;
+d.h=b.clientHeight;
+}
+}
+}
+return d;
+},getViewportPosition:function(){
+var c=new MochiKit.Style.Coordinates(0,0);
+var d=MochiKit.DOM._document;
+var de=d.documentElement;
+var db=d.body;
+if(de&&(de.scrollTop||de.scrollLeft)){
+c.x=de.scrollLeft;
+c.y=de.scrollTop;
+}else{
+if(db){
+c.x=db.scrollLeft;
+c.y=db.scrollTop;
+}
+}
+return c;
+},__new__:function(){
+var m=MochiKit.Base;
+var _470=["A","ABBR","ACRONYM","B","BASEFONT","BDO","BIG","BR","CITE","CODE","DFN","EM","FONT","I","IMG","KBD","LABEL","Q","S","SAMP","SMALL","SPAN","STRIKE","STRONG","SUB","SUP","TEXTAREA","TT","U","VAR"];
+this._defaultDisplay={"TABLE":"table","THEAD":"table-header-group","TBODY":"table-row-group","TFOOT":"table-footer-group","COLGROUP":"table-column-group","COL":"table-column","TR":"table-row","TD":"table-cell","TH":"table-cell","CAPTION":"table-caption","LI":"list-item","INPUT":"inline-block","SELECT":"inline-block"};
+if(/MSIE/.test(navigator.userAgent)){
+for(var k in this._defaultDisplay){
+var v=this._defaultDisplay[k];
+if(v.indexOf("table")==0){
+this._defaultDisplay[k]="block";
+}
+}
+}
+for(var i=0;i<_470.length;i++){
+this._defaultDisplay[_470[i]]="inline";
+}
+this.elementPosition=this.getElementPosition;
+this.elementDimensions=this.getElementDimensions;
+this.hideElement=m.partial(this.setDisplayForElement,"none");
+this.showElement=m.partial(this.setDisplayForElement,"block");
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+}});
+MochiKit.Style.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Style);
+MochiKit.Base._deps("LoggingPane",["Base","Logging"]);
+MochiKit.LoggingPane.NAME="MochiKit.LoggingPane";
+MochiKit.LoggingPane.VERSION="1.4.2";
+MochiKit.LoggingPane.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.LoggingPane.toString=function(){
+return this.__repr__();
+};
+MochiKit.LoggingPane.createLoggingPane=function(_474){
+var m=MochiKit.LoggingPane;
+_474=!(!_474);
+if(m._loggingPane&&m._loggingPane.inline!=_474){
+m._loggingPane.closePane();
+m._loggingPane=null;
+}
+if(!m._loggingPane||m._loggingPane.closed){
+m._loggingPane=new m.LoggingPane(_474,MochiKit.Logging.logger);
+}
+return m._loggingPane;
+};
+MochiKit.LoggingPane.LoggingPane=function(_476,_477){
+if(typeof (_477)=="undefined"||_477===null){
+_477=MochiKit.Logging.logger;
+}
+this.logger=_477;
+var _478=MochiKit.Base.update;
+var _479=MochiKit.Base.updatetree;
+var bind=MochiKit.Base.bind;
+var _47b=MochiKit.Base.clone;
+var win=window;
+var uid="_MochiKit_LoggingPane";
+if(typeof (MochiKit.DOM)!="undefined"){
+win=MochiKit.DOM.currentWindow();
+}
+if(!_476){
+var url=win.location.href.split("?")[0].replace(/[#:\/.><&%-]/g,"_");
+var name=uid+"_"+url;
+var nwin=win.open("",name,"dependent,resizable,height=200");
+if(!nwin){
+alert("Not able to open debugging window due to pop-up blocking.");
+return undefined;
+}
+nwin.document.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\" "+"\"http://www.w3.org/TR/html4/loose.dtd\">"+"<html><head><title>[MochiKit.LoggingPane]</title></head>"+"<body></body></html>");
+nwin.document.close();
+nwin.document.title+=" "+win.document.title;
+win=nwin;
+}
+var doc=win.document;
+this.doc=doc;
+var _482=doc.getElementById(uid);
+var _483=!!_482;
+if(_482&&typeof (_482.loggingPane)!="undefined"){
+_482.loggingPane.logger=this.logger;
+_482.loggingPane.buildAndApplyFilter();
+return _482.loggingPane;
+}
+if(_483){
+var _484;
+while((_484=_482.firstChild)){
+_482.removeChild(_484);
+}
+}else{
+_482=doc.createElement("div");
+_482.id=uid;
+}
+_482.loggingPane=this;
+var _485=doc.createElement("input");
+var _486=doc.createElement("input");
+var _487=doc.createElement("button");
+var _488=doc.createElement("button");
+var _489=doc.createElement("button");
+var _48a=doc.createElement("button");
+var _48b=doc.createElement("div");
+var _48c=doc.createElement("div");
+var _48d=uid+"_Listener";
+this.colorTable=_47b(this.colorTable);
+var _48e=[];
+var _48f=null;
+var _490=function(msg){
+var _492=msg.level;
+if(typeof (_492)=="number"){
+_492=MochiKit.Logging.LogLevel[_492];
+}
+return _492;
+};
+var _493=function(msg){
+return msg.info.join(" ");
+};
+var _495=bind(function(msg){
+var _497=_490(msg);
+var text=_493(msg);
+var c=this.colorTable[_497];
+var p=doc.createElement("span");
+p.className="MochiKit-LogMessage MochiKit-LogLevel-"+_497;
+p.style.cssText="margin: 0px; white-space: -moz-pre-wrap; white-space: -o-pre-wrap; white-space: pre-wrap; white-space: pre-line; word-wrap: break-word; wrap-option: emergency; color: "+c;
+p.appendChild(doc.createTextNode(_497+": "+text));
+_48c.appendChild(p);
+_48c.appendChild(doc.createElement("br"));
+if(_48b.offsetHeight>_48b.scrollHeight){
+_48b.scrollTop=0;
+}else{
+_48b.scrollTop=_48b.scrollHeight;
+}
+},this);
+var _49b=function(msg){
+_48e[_48e.length]=msg;
+_495(msg);
+};
+var _49d=function(){
+var _49e,_49f;
+try{
+_49e=new RegExp(_485.value);
+_49f=new RegExp(_486.value);
+}
+catch(e){
+logDebug("Error in filter regex: "+e.message);
+return null;
+}
+return function(msg){
+return (_49e.test(_490(msg))&&_49f.test(_493(msg)));
+};
+};
+var _4a1=function(){
+while(_48c.firstChild){
+_48c.removeChild(_48c.firstChild);
+}
+};
+var _4a2=function(){
+_48e=[];
+_4a1();
+};
+var _4a3=bind(function(){
+if(this.closed){
+return;
+}
+this.closed=true;
+if(MochiKit.LoggingPane._loggingPane==this){
+MochiKit.LoggingPane._loggingPane=null;
+}
+this.logger.removeListener(_48d);
+try{
+try{
+_482.loggingPane=null;
+}
+catch(e){
+logFatal("Bookmarklet was closed incorrectly.");
+}
+if(_476){
+_482.parentNode.removeChild(_482);
+}else{
+this.win.close();
+}
+}
+catch(e){
+}
+},this);
+var _4a4=function(){
+_4a1();
+for(var i=0;i<_48e.length;i++){
+var msg=_48e[i];
+if(_48f===null||_48f(msg)){
+_495(msg);
+}
+}
+};
+this.buildAndApplyFilter=function(){
+_48f=_49d();
+_4a4();
+this.logger.removeListener(_48d);
+this.logger.addListener(_48d,_48f,_49b);
+};
+var _4a7=bind(function(){
+_48e=this.logger.getMessages();
+_4a4();
+},this);
+var _4a8=bind(function(_4a9){
+_4a9=_4a9||window.event;
+key=_4a9.which||_4a9.keyCode;
+if(key==13){
+this.buildAndApplyFilter();
+}
+},this);
+var _4aa="display: block; z-index: 1000; left: 0px; bottom: 0px; position: fixed; width: 100%; background-color: white; font: "+this.logFont;
+if(_476){
+_4aa+="; height: 10em; border-top: 2px solid black";
+}else{
+_4aa+="; height: 100%;";
+}
+_482.style.cssText=_4aa;
+if(!_483){
+doc.body.appendChild(_482);
+}
+_4aa={"cssText":"width: 33%; display: inline; font: "+this.logFont};
+_479(_485,{"value":"FATAL|ERROR|WARNING|INFO|DEBUG","onkeypress":_4a8,"style":_4aa});
+_482.appendChild(_485);
+_479(_486,{"value":".*","onkeypress":_4a8,"style":_4aa});
+_482.appendChild(_486);
+_4aa="width: 8%; display:inline; font: "+this.logFont;
+_487.appendChild(doc.createTextNode("Filter"));
+_487.onclick=bind("buildAndApplyFilter",this);
+_487.style.cssText=_4aa;
+_482.appendChild(_487);
+_488.appendChild(doc.createTextNode("Load"));
+_488.onclick=_4a7;
+_488.style.cssText=_4aa;
+_482.appendChild(_488);
+_489.appendChild(doc.createTextNode("Clear"));
+_489.onclick=_4a2;
+_489.style.cssText=_4aa;
+_482.appendChild(_489);
+_48a.appendChild(doc.createTextNode("Close"));
+_48a.onclick=_4a3;
+_48a.style.cssText=_4aa;
+_482.appendChild(_48a);
+_48b.style.cssText="overflow: auto; width: 100%";
+_48c.style.cssText="width: 100%; height: "+(_476?"8em":"100%");
+_48b.appendChild(_48c);
+_482.appendChild(_48b);
+this.buildAndApplyFilter();
+_4a7();
+if(_476){
+this.win=undefined;
+}else{
+this.win=win;
+}
+this.inline=_476;
+this.closePane=_4a3;
+this.closed=false;
+return this;
+};
+MochiKit.LoggingPane.LoggingPane.prototype={"logFont":"8pt Verdana,sans-serif","colorTable":{"ERROR":"red","FATAL":"darkred","WARNING":"blue","INFO":"black","DEBUG":"green"}};
+MochiKit.LoggingPane.EXPORT_OK=["LoggingPane"];
+MochiKit.LoggingPane.EXPORT=["createLoggingPane"];
+MochiKit.LoggingPane.__new__=function(){
+this.EXPORT_TAGS={":common":this.EXPORT,":all":MochiKit.Base.concat(this.EXPORT,this.EXPORT_OK)};
+MochiKit.Base.nameFunctions(this);
+MochiKit.LoggingPane._loggingPane=null;
+};
+MochiKit.LoggingPane.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.LoggingPane);
+MochiKit.Base._deps("Color",["Base","DOM","Style"]);
+MochiKit.Color.NAME="MochiKit.Color";
+MochiKit.Color.VERSION="1.4.2";
+MochiKit.Color.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Color.toString=function(){
+return this.__repr__();
+};
+MochiKit.Color.Color=function(red,_4ac,blue,_4ae){
+if(typeof (_4ae)=="undefined"||_4ae===null){
+_4ae=1;
+}
+this.rgb={r:red,g:_4ac,b:blue,a:_4ae};
+};
+MochiKit.Color.Color.prototype={__class__:MochiKit.Color.Color,colorWithAlpha:function(_4af){
+var rgb=this.rgb;
+var m=MochiKit.Color;
+return m.Color.fromRGB(rgb.r,rgb.g,rgb.b,_4af);
+},colorWithHue:function(hue){
+var hsl=this.asHSL();
+hsl.h=hue;
+var m=MochiKit.Color;
+return m.Color.fromHSL(hsl);
+},colorWithSaturation:function(_4b5){
+var hsl=this.asHSL();
+hsl.s=_4b5;
+var m=MochiKit.Color;
+return m.Color.fromHSL(hsl);
+},colorWithLightness:function(_4b8){
+var hsl=this.asHSL();
+hsl.l=_4b8;
+var m=MochiKit.Color;
+return m.Color.fromHSL(hsl);
+},darkerColorWithLevel:function(_4bb){
+var hsl=this.asHSL();
+hsl.l=Math.max(hsl.l-_4bb,0);
+var m=MochiKit.Color;
+return m.Color.fromHSL(hsl);
+},lighterColorWithLevel:function(_4be){
+var hsl=this.asHSL();
+hsl.l=Math.min(hsl.l+_4be,1);
+var m=MochiKit.Color;
+return m.Color.fromHSL(hsl);
+},blendedColor:function(_4c1,_4c2){
+if(typeof (_4c2)=="undefined"||_4c2===null){
+_4c2=0.5;
+}
+var sf=1-_4c2;
+var s=this.rgb;
+var d=_4c1.rgb;
+var df=_4c2;
+return MochiKit.Color.Color.fromRGB((s.r*sf)+(d.r*df),(s.g*sf)+(d.g*df),(s.b*sf)+(d.b*df),(s.a*sf)+(d.a*df));
+},compareRGB:function(_4c7){
+var a=this.asRGB();
+var b=_4c7.asRGB();
+return MochiKit.Base.compare([a.r,a.g,a.b,a.a],[b.r,b.g,b.b,b.a]);
+},isLight:function(){
+return this.asHSL().b>0.5;
+},isDark:function(){
+return (!this.isLight());
+},toHSLString:function(){
+var c=this.asHSL();
+var ccc=MochiKit.Color.clampColorComponent;
+var rval=this._hslString;
+if(!rval){
+var mid=(ccc(c.h,360).toFixed(0)+","+ccc(c.s,100).toPrecision(4)+"%"+","+ccc(c.l,100).toPrecision(4)+"%");
+var a=c.a;
+if(a>=1){
+a=1;
+rval="hsl("+mid+")";
+}else{
+if(a<=0){
+a=0;
+}
+rval="hsla("+mid+","+a+")";
+}
+this._hslString=rval;
+}
+return rval;
+},toRGBString:function(){
+var c=this.rgb;
+var ccc=MochiKit.Color.clampColorComponent;
+var rval=this._rgbString;
+if(!rval){
+var mid=(ccc(c.r,255).toFixed(0)+","+ccc(c.g,255).toFixed(0)+","+ccc(c.b,255).toFixed(0));
+if(c.a!=1){
+rval="rgba("+mid+","+c.a+")";
+}else{
+rval="rgb("+mid+")";
+}
+this._rgbString=rval;
+}
+return rval;
+},asRGB:function(){
+return MochiKit.Base.clone(this.rgb);
+},toHexString:function(){
+var m=MochiKit.Color;
+var c=this.rgb;
+var ccc=MochiKit.Color.clampColorComponent;
+var rval=this._hexString;
+if(!rval){
+rval=("#"+m.toColorPart(ccc(c.r,255))+m.toColorPart(ccc(c.g,255))+m.toColorPart(ccc(c.b,255)));
+this._hexString=rval;
+}
+return rval;
+},asHSV:function(){
+var hsv=this.hsv;
+var c=this.rgb;
+if(typeof (hsv)=="undefined"||hsv===null){
+hsv=MochiKit.Color.rgbToHSV(this.rgb);
+this.hsv=hsv;
+}
+return MochiKit.Base.clone(hsv);
+},asHSL:function(){
+var hsl=this.hsl;
+var c=this.rgb;
+if(typeof (hsl)=="undefined"||hsl===null){
+hsl=MochiKit.Color.rgbToHSL(this.rgb);
+this.hsl=hsl;
+}
+return MochiKit.Base.clone(hsl);
+},toString:function(){
+return this.toRGBString();
+},repr:function(){
+var c=this.rgb;
+var col=[c.r,c.g,c.b,c.a];
+return this.__class__.NAME+"("+col.join(", ")+")";
+}};
+MochiKit.Base.update(MochiKit.Color.Color,{fromRGB:function(red,_4de,blue,_4e0){
+var _4e1=MochiKit.Color.Color;
+if(arguments.length==1){
+var rgb=red;
+red=rgb.r;
+_4de=rgb.g;
+blue=rgb.b;
+if(typeof (rgb.a)=="undefined"){
+_4e0=undefined;
+}else{
+_4e0=rgb.a;
+}
+}
+return new _4e1(red,_4de,blue,_4e0);
+},fromHSL:function(hue,_4e4,_4e5,_4e6){
+var m=MochiKit.Color;
+return m.Color.fromRGB(m.hslToRGB.apply(m,arguments));
+},fromHSV:function(hue,_4e9,_4ea,_4eb){
+var m=MochiKit.Color;
+return m.Color.fromRGB(m.hsvToRGB.apply(m,arguments));
+},fromName:function(name){
+var _4ee=MochiKit.Color.Color;
+if(name.charAt(0)=="\""){
+name=name.substr(1,name.length-2);
+}
+var _4ef=_4ee._namedColors[name.toLowerCase()];
+if(typeof (_4ef)=="string"){
+return _4ee.fromHexString(_4ef);
+}else{
+if(name=="transparent"){
+return _4ee.transparentColor();
+}
+}
+return null;
+},fromString:function(_4f0){
+var self=MochiKit.Color.Color;
+var _4f2=_4f0.substr(0,3);
+if(_4f2=="rgb"){
+return self.fromRGBString(_4f0);
+}else{
+if(_4f2=="hsl"){
+return self.fromHSLString(_4f0);
+}else{
+if(_4f0.charAt(0)=="#"){
+return self.fromHexString(_4f0);
+}
+}
+}
+return self.fromName(_4f0);
+},fromHexString:function(_4f3){
+if(_4f3.charAt(0)=="#"){
+_4f3=_4f3.substring(1);
+}
+var _4f4=[];
+var i,hex;
+if(_4f3.length==3){
+for(i=0;i<3;i++){
+hex=_4f3.substr(i,1);
+_4f4.push(parseInt(hex+hex,16)/255);
+}
+}else{
+for(i=0;i<6;i+=2){
+hex=_4f3.substr(i,2);
+_4f4.push(parseInt(hex,16)/255);
+}
+}
+var _4f7=MochiKit.Color.Color;
+return _4f7.fromRGB.apply(_4f7,_4f4);
+},_fromColorString:function(pre,_4f9,_4fa,_4fb){
+if(_4fb.indexOf(pre)===0){
+_4fb=_4fb.substring(_4fb.indexOf("(",3)+1,_4fb.length-1);
+}
+var _4fc=_4fb.split(/\s*,\s*/);
+var _4fd=[];
+for(var i=0;i<_4fc.length;i++){
+var c=_4fc[i];
+var val;
+var _501=c.substring(c.length-3);
+if(c.charAt(c.length-1)=="%"){
+val=0.01*parseFloat(c.substring(0,c.length-1));
+}else{
+if(_501=="deg"){
+val=parseFloat(c)/360;
+}else{
+if(_501=="rad"){
+val=parseFloat(c)/(Math.PI*2);
+}else{
+val=_4fa[i]*parseFloat(c);
+}
+}
+}
+_4fd.push(val);
+}
+return this[_4f9].apply(this,_4fd);
+},fromComputedStyle:function(elem,_503){
+var d=MochiKit.DOM;
+var cls=MochiKit.Color.Color;
+for(elem=d.getElement(elem);elem;elem=elem.parentNode){
+var _506=MochiKit.Style.getStyle.apply(d,arguments);
+if(!_506){
+continue;
+}
+var _507=cls.fromString(_506);
+if(!_507){
+break;
+}
+if(_507.asRGB().a>0){
+return _507;
+}
+}
+return null;
+},fromBackground:function(elem){
+var cls=MochiKit.Color.Color;
+return cls.fromComputedStyle(elem,"backgroundColor","background-color")||cls.whiteColor();
+},fromText:function(elem){
+var cls=MochiKit.Color.Color;
+return cls.fromComputedStyle(elem,"color","color")||cls.blackColor();
+},namedColors:function(){
+return MochiKit.Base.clone(MochiKit.Color.Color._namedColors);
+}});
+MochiKit.Base.update(MochiKit.Color,{clampColorComponent:function(v,_50d){
+v*=_50d;
+if(v<0){
+return 0;
+}else{
+if(v>_50d){
+return _50d;
+}else{
+return v;
+}
+}
+},_hslValue:function(n1,n2,hue){
+if(hue>6){
+hue-=6;
+}else{
+if(hue<0){
+hue+=6;
+}
+}
+var val;
+if(hue<1){
+val=n1+(n2-n1)*hue;
+}else{
+if(hue<3){
+val=n2;
+}else{
+if(hue<4){
+val=n1+(n2-n1)*(4-hue);
+}else{
+val=n1;
+}
+}
+}
+return val;
+},hsvToRGB:function(hue,_513,_514,_515){
+if(arguments.length==1){
+var hsv=hue;
+hue=hsv.h;
+_513=hsv.s;
+_514=hsv.v;
+_515=hsv.a;
+}
+var red;
+var _518;
+var blue;
+if(_513===0){
+red=_514;
+_518=_514;
+blue=_514;
+}else{
+var i=Math.floor(hue*6);
+var f=(hue*6)-i;
+var p=_514*(1-_513);
+var q=_514*(1-(_513*f));
+var t=_514*(1-(_513*(1-f)));
+switch(i){
+case 1:
+red=q;
+_518=_514;
+blue=p;
+break;
+case 2:
+red=p;
+_518=_514;
+blue=t;
+break;
+case 3:
+red=p;
+_518=q;
+blue=_514;
+break;
+case 4:
+red=t;
+_518=p;
+blue=_514;
+break;
+case 5:
+red=_514;
+_518=p;
+blue=q;
+break;
+case 6:
+case 0:
+red=_514;
+_518=t;
+blue=p;
+break;
+}
+}
+return {r:red,g:_518,b:blue,a:_515};
+},hslToRGB:function(hue,_520,_521,_522){
+if(arguments.length==1){
+var hsl=hue;
+hue=hsl.h;
+_520=hsl.s;
+_521=hsl.l;
+_522=hsl.a;
+}
+var red;
+var _525;
+var blue;
+if(_520===0){
+red=_521;
+_525=_521;
+blue=_521;
+}else{
+var m2;
+if(_521<=0.5){
+m2=_521*(1+_520);
+}else{
+m2=_521+_520-(_521*_520);
+}
+var m1=(2*_521)-m2;
+var f=MochiKit.Color._hslValue;
+var h6=hue*6;
+red=f(m1,m2,h6+2);
+_525=f(m1,m2,h6);
+blue=f(m1,m2,h6-2);
+}
+return {r:red,g:_525,b:blue,a:_522};
+},rgbToHSV:function(red,_52c,blue,_52e){
+if(arguments.length==1){
+var rgb=red;
+red=rgb.r;
+_52c=rgb.g;
+blue=rgb.b;
+_52e=rgb.a;
+}
+var max=Math.max(Math.max(red,_52c),blue);
+var min=Math.min(Math.min(red,_52c),blue);
+var hue;
+var _533;
+var _534=max;
+if(min==max){
+hue=0;
+_533=0;
+}else{
+var _535=(max-min);
+_533=_535/max;
+if(red==max){
+hue=(_52c-blue)/_535;
+}else{
+if(_52c==max){
+hue=2+((blue-red)/_535);
+}else{
+hue=4+((red-_52c)/_535);
+}
+}
+hue/=6;
+if(hue<0){
+hue+=1;
+}
+if(hue>1){
+hue-=1;
+}
+}
+return {h:hue,s:_533,v:_534,a:_52e};
+},rgbToHSL:function(red,_537,blue,_539){
+if(arguments.length==1){
+var rgb=red;
+red=rgb.r;
+_537=rgb.g;
+blue=rgb.b;
+_539=rgb.a;
+}
+var max=Math.max(red,Math.max(_537,blue));
+var min=Math.min(red,Math.min(_537,blue));
+var hue;
+var _53e;
+var _53f=(max+min)/2;
+var _540=max-min;
+if(_540===0){
+hue=0;
+_53e=0;
+}else{
+if(_53f<=0.5){
+_53e=_540/(max+min);
+}else{
+_53e=_540/(2-max-min);
+}
+if(red==max){
+hue=(_537-blue)/_540;
+}else{
+if(_537==max){
+hue=2+((blue-red)/_540);
+}else{
+hue=4+((red-_537)/_540);
+}
+}
+hue/=6;
+if(hue<0){
+hue+=1;
+}
+if(hue>1){
+hue-=1;
+}
+}
+return {h:hue,s:_53e,l:_53f,a:_539};
+},toColorPart:function(num){
+num=Math.round(num);
+var _542=num.toString(16);
+if(num<16){
+return "0"+_542;
+}
+return _542;
+},__new__:function(){
+var m=MochiKit.Base;
+this.Color.fromRGBString=m.bind(this.Color._fromColorString,this.Color,"rgb","fromRGB",[1/255,1/255,1/255,1]);
+this.Color.fromHSLString=m.bind(this.Color._fromColorString,this.Color,"hsl","fromHSL",[1/360,0.01,0.01,1]);
+var _544=1/3;
+var _545={black:[0,0,0],blue:[0,0,1],brown:[0.6,0.4,0.2],cyan:[0,1,1],darkGray:[_544,_544,_544],gray:[0.5,0.5,0.5],green:[0,1,0],lightGray:[2*_544,2*_544,2*_544],magenta:[1,0,1],orange:[1,0.5,0],purple:[0.5,0,0.5],red:[1,0,0],transparent:[0,0,0,0],white:[1,1,1],yellow:[1,1,0]};
+var _546=function(name,r,g,b,a){
+var rval=this.fromRGB(r,g,b,a);
+this[name]=function(){
+return rval;
+};
+return rval;
+};
+for(var k in _545){
+var name=k+"Color";
+var _54f=m.concat([_546,this.Color,name],_545[k]);
+this.Color[name]=m.bind.apply(null,_54f);
+}
+var _550=function(){
+for(var i=0;i<arguments.length;i++){
+if(!(arguments[i] instanceof MochiKit.Color.Color)){
+return false;
+}
+}
+return true;
+};
+var _552=function(a,b){
+return a.compareRGB(b);
+};
+m.nameFunctions(this);
+m.registerComparator(this.Color.NAME,_550,_552);
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+}});
+MochiKit.Color.EXPORT=["Color"];
+MochiKit.Color.EXPORT_OK=["clampColorComponent","rgbToHSL","hslToRGB","rgbToHSV","hsvToRGB","toColorPart"];
+MochiKit.Color.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Color);
+MochiKit.Color.Color._namedColors={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"};
+MochiKit.Base._deps("Signal",["Base","DOM","Style"]);
+MochiKit.Signal.NAME="MochiKit.Signal";
+MochiKit.Signal.VERSION="1.4.2";
+MochiKit.Signal._observers=[];
+MochiKit.Signal.Event=function(src,e){
+this._event=e||window.event;
+this._src=src;
+};
+MochiKit.Base.update(MochiKit.Signal.Event.prototype,{__repr__:function(){
+var repr=MochiKit.Base.repr;
+var str="{event(): "+repr(this.event())+", src(): "+repr(this.src())+", type(): "+repr(this.type())+", target(): "+repr(this.target());
+if(this.type()&&this.type().indexOf("key")===0||this.type().indexOf("mouse")===0||this.type().indexOf("click")!=-1||this.type()=="contextmenu"){
+str+=", modifier(): "+"{alt: "+repr(this.modifier().alt)+", ctrl: "+repr(this.modifier().ctrl)+", meta: "+repr(this.modifier().meta)+", shift: "+repr(this.modifier().shift)+", any: "+repr(this.modifier().any)+"}";
+}
+if(this.type()&&this.type().indexOf("key")===0){
+str+=", key(): {code: "+repr(this.key().code)+", string: "+repr(this.key().string)+"}";
+}
+if(this.type()&&(this.type().indexOf("mouse")===0||this.type().indexOf("click")!=-1||this.type()=="contextmenu")){
+str+=", mouse(): {page: "+repr(this.mouse().page)+", client: "+repr(this.mouse().client);
+if(this.type()!="mousemove"&&this.type()!="mousewheel"){
+str+=", button: {left: "+repr(this.mouse().button.left)+", middle: "+repr(this.mouse().button.middle)+", right: "+repr(this.mouse().button.right)+"}";
+}
+if(this.type()=="mousewheel"){
+str+=", wheel: "+repr(this.mouse().wheel);
+}
+str+="}";
+}
+if(this.type()=="mouseover"||this.type()=="mouseout"||this.type()=="mouseenter"||this.type()=="mouseleave"){
+str+=", relatedTarget(): "+repr(this.relatedTarget());
+}
+str+="}";
+return str;
+},toString:function(){
+return this.__repr__();
+},src:function(){
+return this._src;
+},event:function(){
+return this._event;
+},type:function(){
+if(this._event.type==="DOMMouseScroll"){
+return "mousewheel";
+}else{
+return this._event.type||undefined;
+}
+},target:function(){
+return this._event.target||this._event.srcElement;
+},_relatedTarget:null,relatedTarget:function(){
+if(this._relatedTarget!==null){
+return this._relatedTarget;
+}
+var elem=null;
+if(this.type()=="mouseover"||this.type()=="mouseenter"){
+elem=(this._event.relatedTarget||this._event.fromElement);
+}else{
+if(this.type()=="mouseout"||this.type()=="mouseleave"){
+elem=(this._event.relatedTarget||this._event.toElement);
+}
+}
+try{
+if(elem!==null&&elem.nodeType!==null){
+this._relatedTarget=elem;
+return elem;
+}
+}
+catch(ignore){
+}
+return undefined;
+},_modifier:null,modifier:function(){
+if(this._modifier!==null){
+return this._modifier;
+}
+var m={};
+m.alt=this._event.altKey;
+m.ctrl=this._event.ctrlKey;
+m.meta=this._event.metaKey||false;
+m.shift=this._event.shiftKey;
+m.any=m.alt||m.ctrl||m.shift||m.meta;
+this._modifier=m;
+return m;
+},_key:null,key:function(){
+if(this._key!==null){
+return this._key;
+}
+var k={};
+if(this.type()&&this.type().indexOf("key")===0){
+if(this.type()=="keydown"||this.type()=="keyup"){
+k.code=this._event.keyCode;
+k.string=(MochiKit.Signal._specialKeys[k.code]||"KEY_UNKNOWN");
+this._key=k;
+return k;
+}else{
+if(this.type()=="keypress"){
+k.code=0;
+k.string="";
+if(typeof (this._event.charCode)!="undefined"&&this._event.charCode!==0&&!MochiKit.Signal._specialMacKeys[this._event.charCode]){
+k.code=this._event.charCode;
+k.string=String.fromCharCode(k.code);
+}else{
+if(this._event.keyCode&&typeof (this._event.charCode)=="undefined"){
+k.code=this._event.keyCode;
+k.string=String.fromCharCode(k.code);
+}
+}
+this._key=k;
+return k;
+}
+}
+}
+return undefined;
+},_mouse:null,mouse:function(){
+if(this._mouse!==null){
+return this._mouse;
+}
+var m={};
+var e=this._event;
+if(this.type()&&(this.type().indexOf("mouse")===0||this.type().indexOf("click")!=-1||this.type()=="contextmenu")){
+m.client=new MochiKit.Style.Coordinates(0,0);
+if(e.clientX||e.clientY){
+m.client.x=(!e.clientX||e.clientX<0)?0:e.clientX;
+m.client.y=(!e.clientY||e.clientY<0)?0:e.clientY;
+}
+m.page=new MochiKit.Style.Coordinates(0,0);
+if(e.pageX||e.pageY){
+m.page.x=(!e.pageX||e.pageX<0)?0:e.pageX;
+m.page.y=(!e.pageY||e.pageY<0)?0:e.pageY;
+}else{
+var de=MochiKit.DOM._document.documentElement;
+var b=MochiKit.DOM._document.body;
+m.page.x=e.clientX+(de.scrollLeft||b.scrollLeft)-(de.clientLeft||0);
+m.page.y=e.clientY+(de.scrollTop||b.scrollTop)-(de.clientTop||0);
+}
+if(this.type()!="mousemove"&&this.type()!="mousewheel"){
+m.button={};
+m.button.left=false;
+m.button.right=false;
+m.button.middle=false;
+if(e.which){
+m.button.left=(e.which==1);
+m.button.middle=(e.which==2);
+m.button.right=(e.which==3);
+}else{
+m.button.left=!!(e.button&1);
+m.button.right=!!(e.button&2);
+m.button.middle=!!(e.button&4);
+}
+}
+if(this.type()=="mousewheel"){
+m.wheel=new MochiKit.Style.Coordinates(0,0);
+if(e.wheelDeltaX||e.wheelDeltaY){
+m.wheel.x=e.wheelDeltaX/-40||0;
+m.wheel.y=e.wheelDeltaY/-40||0;
+}else{
+if(e.wheelDelta){
+m.wheel.y=e.wheelDelta/-40;
+}else{
+m.wheel.y=e.detail||0;
+}
+}
+}
+this._mouse=m;
+return m;
+}
+return undefined;
+},stop:function(){
+this.stopPropagation();
+this.preventDefault();
+},stopPropagation:function(){
+if(this._event.stopPropagation){
+this._event.stopPropagation();
+}else{
+this._event.cancelBubble=true;
+}
+},preventDefault:function(){
+if(this._event.preventDefault){
+this._event.preventDefault();
+}else{
+if(this._confirmUnload===null){
+this._event.returnValue=false;
+}
+}
+},_confirmUnload:null,confirmUnload:function(msg){
+if(this.type()=="beforeunload"){
+this._confirmUnload=msg;
+this._event.returnValue=msg;
+}
+}});
+MochiKit.Signal._specialMacKeys={3:"KEY_ENTER",63289:"KEY_NUM_PAD_CLEAR",63276:"KEY_PAGE_UP",63277:"KEY_PAGE_DOWN",63275:"KEY_END",63273:"KEY_HOME",63234:"KEY_ARROW_LEFT",63232:"KEY_ARROW_UP",63235:"KEY_ARROW_RIGHT",63233:"KEY_ARROW_DOWN",63302:"KEY_INSERT",63272:"KEY_DELETE"};
+(function(){
+var _561=MochiKit.Signal._specialMacKeys;
+for(i=63236;i<=63242;i++){
+_561[i]="KEY_F"+(i-63236+1);
+}
+})();
+MochiKit.Signal._specialKeys={8:"KEY_BACKSPACE",9:"KEY_TAB",12:"KEY_NUM_PAD_CLEAR",13:"KEY_ENTER",16:"KEY_SHIFT",17:"KEY_CTRL",18:"KEY_ALT",19:"KEY_PAUSE",20:"KEY_CAPS_LOCK",27:"KEY_ESCAPE",32:"KEY_SPACEBAR",33:"KEY_PAGE_UP",34:"KEY_PAGE_DOWN",35:"KEY_END",36:"KEY_HOME",37:"KEY_ARROW_LEFT",38:"KEY_ARROW_UP",39:"KEY_ARROW_RIGHT",40:"KEY_ARROW_DOWN",44:"KEY_PRINT_SCREEN",45:"KEY_INSERT",46:"KEY_DELETE",59:"KEY_SEMICOLON",91:"KEY_WINDOWS_LEFT",92:"KEY_WINDOWS_RIGHT",93:"KEY_SELECT",106:"KEY_NUM_PAD_ASTERISK",107:"KEY_NUM_PAD_PLUS_SIGN",109:"KEY_NUM_PAD_HYPHEN-MINUS",110:"KEY_NUM_PAD_FULL_STOP",111:"KEY_NUM_PAD_SOLIDUS",144:"KEY_NUM_LOCK",145:"KEY_SCROLL_LOCK",186:"KEY_SEMICOLON",187:"KEY_EQUALS_SIGN",188:"KEY_COMMA",189:"KEY_HYPHEN-MINUS",190:"KEY_FULL_STOP",191:"KEY_SOLIDUS",192:"KEY_GRAVE_ACCENT",219:"KEY_LEFT_SQUARE_BRACKET",220:"KEY_REVERSE_SOLIDUS",221:"KEY_RIGHT_SQUARE_BRACKET",222:"KEY_APOSTROPHE"};
+(function(){
+var _562=MochiKit.Signal._specialKeys;
+for(var i=48;i<=57;i++){
+_562[i]="KEY_"+(i-48);
+}
+for(i=65;i<=90;i++){
+_562[i]="KEY_"+String.fromCharCode(i);
+}
+for(i=96;i<=105;i++){
+_562[i]="KEY_NUM_PAD_"+(i-96);
+}
+for(i=112;i<=123;i++){
+_562[i]="KEY_F"+(i-112+1);
+}
+})();
+MochiKit.Signal.Ident=function(_564){
+this.source=_564.source;
+this.signal=_564.signal;
+this.listener=_564.listener;
+this.isDOM=_564.isDOM;
+this.objOrFunc=_564.objOrFunc;
+this.funcOrStr=_564.funcOrStr;
+this.connected=_564.connected;
+};
+MochiKit.Signal.Ident.prototype={};
+MochiKit.Base.update(MochiKit.Signal,{__repr__:function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+},toString:function(){
+return this.__repr__();
+},_unloadCache:function(){
+var self=MochiKit.Signal;
+var _566=self._observers;
+for(var i=0;i<_566.length;i++){
+if(_566[i].signal!=="onload"&&_566[i].signal!=="onunload"){
+self._disconnect(_566[i]);
+}
+}
+},_listener:function(src,sig,func,obj,_56c){
+var self=MochiKit.Signal;
+var E=self.Event;
+if(!_56c){
+if(typeof (func.im_self)=="undefined"){
+return MochiKit.Base.bindLate(func,obj);
+}else{
+return func;
+}
+}
+obj=obj||src;
+if(typeof (func)=="string"){
+if(sig==="onload"||sig==="onunload"){
+return function(_56f){
+obj[func].apply(obj,[new E(src,_56f)]);
+var _570=new MochiKit.Signal.Ident({source:src,signal:sig,objOrFunc:obj,funcOrStr:func});
+MochiKit.Signal._disconnect(_570);
+};
+}else{
+return function(_571){
+obj[func].apply(obj,[new E(src,_571)]);
+};
+}
+}else{
+if(sig==="onload"||sig==="onunload"){
+return function(_572){
+func.apply(obj,[new E(src,_572)]);
+var _573=new MochiKit.Signal.Ident({source:src,signal:sig,objOrFunc:func});
+MochiKit.Signal._disconnect(_573);
+};
+}else{
+return function(_574){
+func.apply(obj,[new E(src,_574)]);
+};
+}
+}
+},_browserAlreadyHasMouseEnterAndLeave:function(){
+return /MSIE/.test(navigator.userAgent);
+},_browserLacksMouseWheelEvent:function(){
+return /Gecko\//.test(navigator.userAgent);
+},_mouseEnterListener:function(src,sig,func,obj){
+var E=MochiKit.Signal.Event;
+return function(_57a){
+var e=new E(src,_57a);
+try{
+e.relatedTarget().nodeName;
+}
+catch(err){
+return;
+}
+e.stop();
+if(MochiKit.DOM.isChildNode(e.relatedTarget(),src)){
+return;
+}
+e.type=function(){
+return sig;
+};
+if(typeof (func)=="string"){
+return obj[func].apply(obj,[e]);
+}else{
+return func.apply(obj,[e]);
+}
+};
+},_getDestPair:function(_57c,_57d){
+var obj=null;
+var func=null;
+if(typeof (_57d)!="undefined"){
+obj=_57c;
+func=_57d;
+if(typeof (_57d)=="string"){
+if(typeof (_57c[_57d])!="function"){
+throw new Error("'funcOrStr' must be a function on 'objOrFunc'");
+}
+}else{
+if(typeof (_57d)!="function"){
+throw new Error("'funcOrStr' must be a function or string");
+}
+}
+}else{
+if(typeof (_57c)!="function"){
+throw new Error("'objOrFunc' must be a function if 'funcOrStr' is not given");
+}else{
+func=_57c;
+}
+}
+return [obj,func];
+},connect:function(src,sig,_582,_583){
+src=MochiKit.DOM.getElement(src);
+var self=MochiKit.Signal;
+if(typeof (sig)!="string"){
+throw new Error("'sig' must be a string");
+}
+var _585=self._getDestPair(_582,_583);
+var obj=_585[0];
+var func=_585[1];
+if(typeof (obj)=="undefined"||obj===null){
+obj=src;
+}
+var _588=!!(src.addEventListener||src.attachEvent);
+if(_588&&(sig==="onmouseenter"||sig==="onmouseleave")&&!self._browserAlreadyHasMouseEnterAndLeave()){
+var _589=self._mouseEnterListener(src,sig.substr(2),func,obj);
+if(sig==="onmouseenter"){
+sig="onmouseover";
+}else{
+sig="onmouseout";
+}
+}else{
+if(_588&&sig=="onmousewheel"&&self._browserLacksMouseWheelEvent()){
+var _589=self._listener(src,sig,func,obj,_588);
+sig="onDOMMouseScroll";
+}else{
+var _589=self._listener(src,sig,func,obj,_588);
+}
+}
+if(src.addEventListener){
+src.addEventListener(sig.substr(2),_589,false);
+}else{
+if(src.attachEvent){
+src.attachEvent(sig,_589);
+}
+}
+var _58a=new MochiKit.Signal.Ident({source:src,signal:sig,listener:_589,isDOM:_588,objOrFunc:_582,funcOrStr:_583,connected:true});
+self._observers.push(_58a);
+if(!_588&&typeof (src.__connect__)=="function"){
+var args=MochiKit.Base.extend([_58a],arguments,1);
+src.__connect__.apply(src,args);
+}
+return _58a;
+},_disconnect:function(_58c){
+if(!_58c.connected){
+return;
+}
+_58c.connected=false;
+var src=_58c.source;
+var sig=_58c.signal;
+var _58f=_58c.listener;
+if(!_58c.isDOM){
+if(typeof (src.__disconnect__)=="function"){
+src.__disconnect__(_58c,sig,_58c.objOrFunc,_58c.funcOrStr);
+}
+return;
+}
+if(src.removeEventListener){
+src.removeEventListener(sig.substr(2),_58f,false);
+}else{
+if(src.detachEvent){
+src.detachEvent(sig,_58f);
+}else{
+throw new Error("'src' must be a DOM element");
+}
+}
+},disconnect:function(_590){
+var self=MochiKit.Signal;
+var _592=self._observers;
+var m=MochiKit.Base;
+if(arguments.length>1){
+var src=MochiKit.DOM.getElement(arguments[0]);
+var sig=arguments[1];
+var obj=arguments[2];
+var func=arguments[3];
+for(var i=_592.length-1;i>=0;i--){
+var o=_592[i];
+if(o.source===src&&o.signal===sig&&o.objOrFunc===obj&&o.funcOrStr===func){
+self._disconnect(o);
+if(!self._lock){
+_592.splice(i,1);
+}else{
+self._dirty=true;
+}
+return true;
+}
+}
+}else{
+var idx=m.findIdentical(_592,_590);
+if(idx>=0){
+self._disconnect(_590);
+if(!self._lock){
+_592.splice(idx,1);
+}else{
+self._dirty=true;
+}
+return true;
+}
+}
+return false;
+},disconnectAllTo:function(_59b,_59c){
+var self=MochiKit.Signal;
+var _59e=self._observers;
+var _59f=self._disconnect;
+var _5a0=self._lock;
+var _5a1=self._dirty;
+if(typeof (_59c)==="undefined"){
+_59c=null;
+}
+for(var i=_59e.length-1;i>=0;i--){
+var _5a3=_59e[i];
+if(_5a3.objOrFunc===_59b&&(_59c===null||_5a3.funcOrStr===_59c)){
+_59f(_5a3);
+if(_5a0){
+_5a1=true;
+}else{
+_59e.splice(i,1);
+}
+}
+}
+self._dirty=_5a1;
+},disconnectAll:function(src,sig){
+src=MochiKit.DOM.getElement(src);
+var m=MochiKit.Base;
+var _5a7=m.flattenArguments(m.extend(null,arguments,1));
+var self=MochiKit.Signal;
+var _5a9=self._disconnect;
+var _5aa=self._observers;
+var i,_5ac;
+var _5ad=self._lock;
+var _5ae=self._dirty;
+if(_5a7.length===0){
+for(i=_5aa.length-1;i>=0;i--){
+_5ac=_5aa[i];
+if(_5ac.source===src){
+_5a9(_5ac);
+if(!_5ad){
+_5aa.splice(i,1);
+}else{
+_5ae=true;
+}
+}
+}
+}else{
+var sigs={};
+for(i=0;i<_5a7.length;i++){
+sigs[_5a7[i]]=true;
+}
+for(i=_5aa.length-1;i>=0;i--){
+_5ac=_5aa[i];
+if(_5ac.source===src&&_5ac.signal in sigs){
+_5a9(_5ac);
+if(!_5ad){
+_5aa.splice(i,1);
+}else{
+_5ae=true;
+}
+}
+}
+}
+self._dirty=_5ae;
+},signal:function(src,sig){
+var self=MochiKit.Signal;
+var _5b3=self._observers;
+src=MochiKit.DOM.getElement(src);
+var args=MochiKit.Base.extend(null,arguments,2);
+var _5b5=[];
+self._lock=true;
+for(var i=0;i<_5b3.length;i++){
+var _5b7=_5b3[i];
+if(_5b7.source===src&&_5b7.signal===sig&&_5b7.connected){
+try{
+_5b7.listener.apply(src,args);
+}
+catch(e){
+_5b5.push(e);
+}
+}
+}
+self._lock=false;
+if(self._dirty){
+self._dirty=false;
+for(var i=_5b3.length-1;i>=0;i--){
+if(!_5b3[i].connected){
+_5b3.splice(i,1);
+}
+}
+}
+if(_5b5.length==1){
+throw _5b5[0];
+}else{
+if(_5b5.length>1){
+var e=new Error("Multiple errors thrown in handling 'sig', see errors property");
+e.errors=_5b5;
+throw e;
+}
+}
+}});
+MochiKit.Signal.EXPORT_OK=[];
+MochiKit.Signal.EXPORT=["connect","disconnect","signal","disconnectAll","disconnectAllTo"];
+MochiKit.Signal.__new__=function(win){
+var m=MochiKit.Base;
+this._document=document;
+this._window=win;
+this._lock=false;
+this._dirty=false;
+try{
+this.connect(window,"onunload",this._unloadCache);
+}
+catch(e){
+}
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+};
+MochiKit.Signal.__new__(this);
+if(MochiKit.__export__){
+connect=MochiKit.Signal.connect;
+disconnect=MochiKit.Signal.disconnect;
+disconnectAll=MochiKit.Signal.disconnectAll;
+signal=MochiKit.Signal.signal;
+}
+MochiKit.Base._exportSymbols(this,MochiKit.Signal);
+MochiKit.Base._deps("Position",["Base","DOM","Style"]);
+MochiKit.Position.NAME="MochiKit.Position";
+MochiKit.Position.VERSION="1.4.2";
+MochiKit.Position.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Position.toString=function(){
+return this.__repr__();
+};
+MochiKit.Position.EXPORT_OK=[];
+MochiKit.Position.EXPORT=[];
+MochiKit.Base.update(MochiKit.Position,{includeScrollOffsets:false,prepare:function(){
+var _5bb=window.pageXOffset||document.documentElement.scrollLeft||document.body.scrollLeft||0;
+var _5bc=window.pageYOffset||document.documentElement.scrollTop||document.body.scrollTop||0;
+this.windowOffset=new MochiKit.Style.Coordinates(_5bb,_5bc);
+},cumulativeOffset:function(_5bd){
+var _5be=0;
+var _5bf=0;
+do{
+_5be+=_5bd.offsetTop||0;
+_5bf+=_5bd.offsetLeft||0;
+_5bd=_5bd.offsetParent;
+}while(_5bd);
+return new MochiKit.Style.Coordinates(_5bf,_5be);
+},realOffset:function(_5c0){
+var _5c1=0;
+var _5c2=0;
+do{
+_5c1+=_5c0.scrollTop||0;
+_5c2+=_5c0.scrollLeft||0;
+_5c0=_5c0.parentNode;
+}while(_5c0);
+return new MochiKit.Style.Coordinates(_5c2,_5c1);
+},within:function(_5c3,x,y){
+if(this.includeScrollOffsets){
+return this.withinIncludingScrolloffsets(_5c3,x,y);
+}
+this.xcomp=x;
+this.ycomp=y;
+this.offset=this.cumulativeOffset(_5c3);
+if(_5c3.style.position=="fixed"){
+this.offset.x+=this.windowOffset.x;
+this.offset.y+=this.windowOffset.y;
+}
+return (y>=this.offset.y&&y<this.offset.y+_5c3.offsetHeight&&x>=this.offset.x&&x<this.offset.x+_5c3.offsetWidth);
+},withinIncludingScrolloffsets:function(_5c6,x,y){
+var _5c9=this.realOffset(_5c6);
+this.xcomp=x+_5c9.x-this.windowOffset.x;
+this.ycomp=y+_5c9.y-this.windowOffset.y;
+this.offset=this.cumulativeOffset(_5c6);
+return (this.ycomp>=this.offset.y&&this.ycomp<this.offset.y+_5c6.offsetHeight&&this.xcomp>=this.offset.x&&this.xcomp<this.offset.x+_5c6.offsetWidth);
+},overlap:function(mode,_5cb){
+if(!mode){
+return 0;
+}
+if(mode=="vertical"){
+return ((this.offset.y+_5cb.offsetHeight)-this.ycomp)/_5cb.offsetHeight;
+}
+if(mode=="horizontal"){
+return ((this.offset.x+_5cb.offsetWidth)-this.xcomp)/_5cb.offsetWidth;
+}
+},absolutize:function(_5cc){
+_5cc=MochiKit.DOM.getElement(_5cc);
+if(_5cc.style.position=="absolute"){
+return;
+}
+MochiKit.Position.prepare();
+var _5cd=MochiKit.Position.positionedOffset(_5cc);
+var _5ce=_5cc.clientWidth;
+var _5cf=_5cc.clientHeight;
+var _5d0={"position":_5cc.style.position,"left":_5cd.x-parseFloat(_5cc.style.left||0),"top":_5cd.y-parseFloat(_5cc.style.top||0),"width":_5cc.style.width,"height":_5cc.style.height};
+_5cc.style.position="absolute";
+_5cc.style.top=_5cd.y+"px";
+_5cc.style.left=_5cd.x+"px";
+_5cc.style.width=_5ce+"px";
+_5cc.style.height=_5cf+"px";
+return _5d0;
+},positionedOffset:function(_5d1){
+var _5d2=0,_5d3=0;
+do{
+_5d2+=_5d1.offsetTop||0;
+_5d3+=_5d1.offsetLeft||0;
+_5d1=_5d1.offsetParent;
+if(_5d1){
+p=MochiKit.Style.getStyle(_5d1,"position");
+if(p=="relative"||p=="absolute"){
+break;
+}
+}
+}while(_5d1);
+return new MochiKit.Style.Coordinates(_5d3,_5d2);
+},relativize:function(_5d4,_5d5){
+_5d4=MochiKit.DOM.getElement(_5d4);
+if(_5d4.style.position=="relative"){
+return;
+}
+MochiKit.Position.prepare();
+var top=parseFloat(_5d4.style.top||0)-(_5d5["top"]||0);
+var left=parseFloat(_5d4.style.left||0)-(_5d5["left"]||0);
+_5d4.style.position=_5d5["position"];
+_5d4.style.top=top+"px";
+_5d4.style.left=left+"px";
+_5d4.style.width=_5d5["width"];
+_5d4.style.height=_5d5["height"];
+},clone:function(_5d8,_5d9){
+_5d8=MochiKit.DOM.getElement(_5d8);
+_5d9=MochiKit.DOM.getElement(_5d9);
+_5d9.style.position="absolute";
+var _5da=this.cumulativeOffset(_5d8);
+_5d9.style.top=_5da.y+"px";
+_5d9.style.left=_5da.x+"px";
+_5d9.style.width=_5d8.offsetWidth+"px";
+_5d9.style.height=_5d8.offsetHeight+"px";
+},page:function(_5db){
+var _5dc=0;
+var _5dd=0;
+var _5de=_5db;
+do{
+_5dc+=_5de.offsetTop||0;
+_5dd+=_5de.offsetLeft||0;
+if(_5de.offsetParent==document.body&&MochiKit.Style.getStyle(_5de,"position")=="absolute"){
+break;
+}
+}while(_5de=_5de.offsetParent);
+_5de=_5db;
+do{
+_5dc-=_5de.scrollTop||0;
+_5dd-=_5de.scrollLeft||0;
+}while(_5de=_5de.parentNode);
+return new MochiKit.Style.Coordinates(_5dd,_5dc);
+}});
+MochiKit.Position.__new__=function(win){
+var m=MochiKit.Base;
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+m.nameFunctions(this);
+};
+MochiKit.Position.__new__(this);
+MochiKit.Base._exportSymbols(this,MochiKit.Position);
+MochiKit.Base._deps("Visual",["Base","DOM","Style","Color","Position"]);
+MochiKit.Visual.NAME="MochiKit.Visual";
+MochiKit.Visual.VERSION="1.4.2";
+MochiKit.Visual.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Visual.toString=function(){
+return this.__repr__();
+};
+MochiKit.Visual._RoundCorners=function(e,_5e2){
+e=MochiKit.DOM.getElement(e);
+this._setOptions(_5e2);
+if(this.options.__unstable__wrapElement){
+e=this._doWrap(e);
+}
+var _5e3=this.options.color;
+var C=MochiKit.Color.Color;
+if(this.options.color==="fromElement"){
+_5e3=C.fromBackground(e);
+}else{
+if(!(_5e3 instanceof C)){
+_5e3=C.fromString(_5e3);
+}
+}
+this.isTransparent=(_5e3.asRGB().a<=0);
+var _5e5=this.options.bgColor;
+if(this.options.bgColor==="fromParent"){
+_5e5=C.fromBackground(e.offsetParent);
+}else{
+if(!(_5e5 instanceof C)){
+_5e5=C.fromString(_5e5);
+}
+}
+this._roundCornersImpl(e,_5e3,_5e5);
+};
+MochiKit.Visual._RoundCorners.prototype={_doWrap:function(e){
+var _5e7=e.parentNode;
+var doc=MochiKit.DOM.currentDocument();
+if(typeof (doc.defaultView)==="undefined"||doc.defaultView===null){
+return e;
+}
+var _5e9=doc.defaultView.getComputedStyle(e,null);
+if(typeof (_5e9)==="undefined"||_5e9===null){
+return e;
+}
+var _5ea=MochiKit.DOM.DIV({"style":{display:"block",marginTop:_5e9.getPropertyValue("padding-top"),marginRight:_5e9.getPropertyValue("padding-right"),marginBottom:_5e9.getPropertyValue("padding-bottom"),marginLeft:_5e9.getPropertyValue("padding-left"),padding:"0px"}});
+_5ea.innerHTML=e.innerHTML;
+e.innerHTML="";
+e.appendChild(_5ea);
+return e;
+},_roundCornersImpl:function(e,_5ec,_5ed){
+if(this.options.border){
+this._renderBorder(e,_5ed);
+}
+if(this._isTopRounded()){
+this._roundTopCorners(e,_5ec,_5ed);
+}
+if(this._isBottomRounded()){
+this._roundBottomCorners(e,_5ec,_5ed);
+}
+},_renderBorder:function(el,_5ef){
+var _5f0="1px solid "+this._borderColor(_5ef);
+var _5f1="border-left: "+_5f0;
+var _5f2="border-right: "+_5f0;
+var _5f3="style='"+_5f1+";"+_5f2+"'";
+el.innerHTML="<div "+_5f3+">"+el.innerHTML+"</div>";
+},_roundTopCorners:function(el,_5f5,_5f6){
+var _5f7=this._createCorner(_5f6);
+for(var i=0;i<this.options.numSlices;i++){
+_5f7.appendChild(this._createCornerSlice(_5f5,_5f6,i,"top"));
+}
+el.style.paddingTop=0;
+el.insertBefore(_5f7,el.firstChild);
+},_roundBottomCorners:function(el,_5fa,_5fb){
+var _5fc=this._createCorner(_5fb);
+for(var i=(this.options.numSlices-1);i>=0;i--){
+_5fc.appendChild(this._createCornerSlice(_5fa,_5fb,i,"bottom"));
+}
+el.style.paddingBottom=0;
+el.appendChild(_5fc);
+},_createCorner:function(_5fe){
+var dom=MochiKit.DOM;
+return dom.DIV({style:{backgroundColor:_5fe.toString()}});
+},_createCornerSlice:function(_600,_601,n,_603){
+var _604=MochiKit.DOM.SPAN();
+var _605=_604.style;
+_605.backgroundColor=_600.toString();
+_605.display="block";
+_605.height="1px";
+_605.overflow="hidden";
+_605.fontSize="1px";
+var _606=this._borderColor(_600,_601);
+if(this.options.border&&n===0){
+_605.borderTopStyle="solid";
+_605.borderTopWidth="1px";
+_605.borderLeftWidth="0px";
+_605.borderRightWidth="0px";
+_605.borderBottomWidth="0px";
+_605.height="0px";
+_605.borderColor=_606.toString();
+}else{
+if(_606){
+_605.borderColor=_606.toString();
+_605.borderStyle="solid";
+_605.borderWidth="0px 1px";
+}
+}
+if(!this.options.compact&&(n==(this.options.numSlices-1))){
+_605.height="2px";
+}
+this._setMargin(_604,n,_603);
+this._setBorder(_604,n,_603);
+return _604;
+},_setOptions:function(_607){
+this.options={corners:"all",color:"fromElement",bgColor:"fromParent",blend:true,border:false,compact:false,__unstable__wrapElement:false};
+MochiKit.Base.update(this.options,_607);
+this.options.numSlices=(this.options.compact?2:4);
+},_whichSideTop:function(){
+var _608=this.options.corners;
+if(this._hasString(_608,"all","top")){
+return "";
+}
+var _609=(_608.indexOf("tl")!=-1);
+var _60a=(_608.indexOf("tr")!=-1);
+if(_609&&_60a){
+return "";
+}
+if(_609){
+return "left";
+}
+if(_60a){
+return "right";
+}
+return "";
+},_whichSideBottom:function(){
+var _60b=this.options.corners;
+if(this._hasString(_60b,"all","bottom")){
+return "";
+}
+var _60c=(_60b.indexOf("bl")!=-1);
+var _60d=(_60b.indexOf("br")!=-1);
+if(_60c&&_60d){
+return "";
+}
+if(_60c){
+return "left";
+}
+if(_60d){
+return "right";
+}
+return "";
+},_borderColor:function(_60e,_60f){
+if(_60e=="transparent"){
+return _60f;
+}else{
+if(this.options.border){
+return this.options.border;
+}else{
+if(this.options.blend){
+return _60f.blendedColor(_60e);
+}
+}
+}
+return "";
+},_setMargin:function(el,n,_612){
+var _613=this._marginSize(n)+"px";
+var _614=(_612=="top"?this._whichSideTop():this._whichSideBottom());
+var _615=el.style;
+if(_614=="left"){
+_615.marginLeft=_613;
+_615.marginRight="0px";
+}else{
+if(_614=="right"){
+_615.marginRight=_613;
+_615.marginLeft="0px";
+}else{
+_615.marginLeft=_613;
+_615.marginRight=_613;
+}
+}
+},_setBorder:function(el,n,_618){
+var _619=this._borderSize(n)+"px";
+var _61a=(_618=="top"?this._whichSideTop():this._whichSideBottom());
+var _61b=el.style;
+if(_61a=="left"){
+_61b.borderLeftWidth=_619;
+_61b.borderRightWidth="0px";
+}else{
+if(_61a=="right"){
+_61b.borderRightWidth=_619;
+_61b.borderLeftWidth="0px";
+}else{
+_61b.borderLeftWidth=_619;
+_61b.borderRightWidth=_619;
+}
+}
+},_marginSize:function(n){
+if(this.isTransparent){
+return 0;
+}
+var o=this.options;
+if(o.compact&&o.blend){
+var _61e=[1,0];
+return _61e[n];
+}else{
+if(o.compact){
+var _61f=[2,1];
+return _61f[n];
+}else{
+if(o.blend){
+var _620=[3,2,1,0];
+return _620[n];
+}else{
+var _621=[5,3,2,1];
+return _621[n];
+}
+}
+}
+},_borderSize:function(n){
+var o=this.options;
+var _624;
+if(o.compact&&(o.blend||this.isTransparent)){
+return 1;
+}else{
+if(o.compact){
+_624=[1,0];
+}else{
+if(o.blend){
+_624=[2,1,1,1];
+}else{
+if(o.border){
+_624=[0,2,0,0];
+}else{
+if(this.isTransparent){
+_624=[5,3,2,1];
+}else{
+return 0;
+}
+}
+}
+}
+}
+return _624[n];
+},_hasString:function(str){
+for(var i=1;i<arguments.length;i++){
+if(str.indexOf(arguments[i])!=-1){
+return true;
+}
+}
+return false;
+},_isTopRounded:function(){
+return this._hasString(this.options.corners,"all","top","tl","tr");
+},_isBottomRounded:function(){
+return this._hasString(this.options.corners,"all","bottom","bl","br");
+},_hasSingleTextChild:function(el){
+return (el.childNodes.length==1&&el.childNodes[0].nodeType==3);
+}};
+MochiKit.Visual.roundElement=function(e,_629){
+new MochiKit.Visual._RoundCorners(e,_629);
+};
+MochiKit.Visual.roundClass=function(_62a,_62b,_62c){
+var _62d=MochiKit.DOM.getElementsByTagAndClassName(_62a,_62b);
+for(var i=0;i<_62d.length;i++){
+MochiKit.Visual.roundElement(_62d[i],_62c);
+}
+};
+MochiKit.Visual.tagifyText=function(_62f,_630){
+_630=_630||"position:relative";
+if(/MSIE/.test(navigator.userAgent)){
+_630+=";zoom:1";
+}
+_62f=MochiKit.DOM.getElement(_62f);
+var ma=MochiKit.Base.map;
+ma(function(_632){
+if(_632.nodeType==3){
+ma(function(_633){
+_62f.insertBefore(MochiKit.DOM.SPAN({style:_630},_633==" "?String.fromCharCode(160):_633),_632);
+},_632.nodeValue.split(""));
+MochiKit.DOM.removeElement(_632);
+}
+},_62f.childNodes);
+};
+MochiKit.Visual.forceRerendering=function(_634){
+try{
+_634=MochiKit.DOM.getElement(_634);
+var n=document.createTextNode(" ");
+_634.appendChild(n);
+_634.removeChild(n);
+}
+catch(e){
+}
+};
+MochiKit.Visual.multiple=function(_636,_637,_638){
+_638=MochiKit.Base.update({speed:0.1,delay:0},_638);
+var _639=_638.delay;
+var _63a=0;
+MochiKit.Base.map(function(_63b){
+_638.delay=_63a*_638.speed+_639;
+new _637(_63b,_638);
+_63a+=1;
+},_636);
+};
+MochiKit.Visual.PAIRS={"slide":["slideDown","slideUp"],"blind":["blindDown","blindUp"],"appear":["appear","fade"],"size":["grow","shrink"]};
+MochiKit.Visual.toggle=function(_63c,_63d,_63e){
+_63c=MochiKit.DOM.getElement(_63c);
+_63d=(_63d||"appear").toLowerCase();
+_63e=MochiKit.Base.update({queue:{position:"end",scope:(_63c.id||"global"),limit:1}},_63e);
+var v=MochiKit.Visual;
+v[MochiKit.Style.getStyle(_63c,"display")!="none"?v.PAIRS[_63d][1]:v.PAIRS[_63d][0]](_63c,_63e);
+};
+MochiKit.Visual.Transitions={};
+MochiKit.Visual.Transitions.linear=function(pos){
+return pos;
+};
+MochiKit.Visual.Transitions.sinoidal=function(pos){
+return 0.5-Math.cos(pos*Math.PI)/2;
+};
+MochiKit.Visual.Transitions.reverse=function(pos){
+return 1-pos;
+};
+MochiKit.Visual.Transitions.flicker=function(pos){
+return 0.25-Math.cos(pos*Math.PI)/4+Math.random()/2;
+};
+MochiKit.Visual.Transitions.wobble=function(pos){
+return 0.5-Math.cos(9*pos*Math.PI)/2;
+};
+MochiKit.Visual.Transitions.pulse=function(pos,_646){
+if(_646){
+pos*=2*_646;
+}else{
+pos*=10;
+}
+var _647=pos-Math.floor(pos);
+return (Math.floor(pos)%2==0)?_647:1-_647;
+};
+MochiKit.Visual.Transitions.parabolic=function(pos){
+return pos*pos;
+};
+MochiKit.Visual.Transitions.none=function(pos){
+return 0;
+};
+MochiKit.Visual.Transitions.full=function(pos){
+return 1;
+};
+MochiKit.Visual.ScopedQueue=function(){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls();
+}
+this.__init__();
+};
+MochiKit.Base.update(MochiKit.Visual.ScopedQueue.prototype,{__init__:function(){
+this.effects=[];
+this.interval=null;
+},add:function(_64c){
+var _64d=new Date().getTime();
+var _64e=(typeof (_64c.options.queue)=="string")?_64c.options.queue:_64c.options.queue.position;
+var ma=MochiKit.Base.map;
+switch(_64e){
+case "front":
+ma(function(e){
+if(e.state=="idle"){
+e.startOn+=_64c.finishOn;
+e.finishOn+=_64c.finishOn;
+}
+},this.effects);
+break;
+case "end":
+var _651;
+ma(function(e){
+var i=e.finishOn;
+if(i>=(_651||i)){
+_651=i;
+}
+},this.effects);
+_64d=_651||_64d;
+break;
+case "break":
+ma(function(e){
+e.finalize();
+},this.effects);
+break;
+}
+_64c.startOn+=_64d;
+_64c.finishOn+=_64d;
+if(!_64c.options.queue.limit||this.effects.length<_64c.options.queue.limit){
+this.effects.push(_64c);
+}
+if(!this.interval){
+this.interval=this.startLoop(MochiKit.Base.bind(this.loop,this),40);
+}
+},startLoop:function(func,_656){
+return setInterval(func,_656);
+},remove:function(_657){
+this.effects=MochiKit.Base.filter(function(e){
+return e!=_657;
+},this.effects);
+if(!this.effects.length){
+this.stopLoop(this.interval);
+this.interval=null;
+}
+},stopLoop:function(_659){
+clearInterval(_659);
+},loop:function(){
+var _65a=new Date().getTime();
+MochiKit.Base.map(function(_65b){
+_65b.loop(_65a);
+},this.effects);
+}});
+MochiKit.Visual.Queues={instances:{},get:function(_65c){
+if(typeof (_65c)!="string"){
+return _65c;
+}
+if(!this.instances[_65c]){
+this.instances[_65c]=new MochiKit.Visual.ScopedQueue();
+}
+return this.instances[_65c];
+}};
+MochiKit.Visual.Queue=MochiKit.Visual.Queues.get("global");
+MochiKit.Visual.DefaultOptions={transition:MochiKit.Visual.Transitions.sinoidal,duration:1,fps:25,sync:false,from:0,to:1,delay:0,queue:"parallel"};
+MochiKit.Visual.Base=function(){
+};
+MochiKit.Visual.Base.prototype={__class__:MochiKit.Visual.Base,start:function(_65d){
+var v=MochiKit.Visual;
+this.options=MochiKit.Base.setdefault(_65d,v.DefaultOptions);
+this.currentFrame=0;
+this.state="idle";
+this.startOn=this.options.delay*1000;
+this.finishOn=this.startOn+(this.options.duration*1000);
+this.event("beforeStart");
+if(!this.options.sync){
+v.Queues.get(typeof (this.options.queue)=="string"?"global":this.options.queue.scope).add(this);
+}
+},loop:function(_65f){
+if(_65f>=this.startOn){
+if(_65f>=this.finishOn){
+return this.finalize();
+}
+var pos=(_65f-this.startOn)/(this.finishOn-this.startOn);
+var _661=Math.round(pos*this.options.fps*this.options.duration);
+if(_661>this.currentFrame){
+this.render(pos);
+this.currentFrame=_661;
+}
+}
+},render:function(pos){
+if(this.state=="idle"){
+this.state="running";
+this.event("beforeSetup");
+this.setup();
+this.event("afterSetup");
+}
+if(this.state=="running"){
+if(this.options.transition){
+pos=this.options.transition(pos);
+}
+pos*=(this.options.to-this.options.from);
+pos+=this.options.from;
+this.event("beforeUpdate");
+this.update(pos);
+this.event("afterUpdate");
+}
+},cancel:function(){
+if(!this.options.sync){
+MochiKit.Visual.Queues.get(typeof (this.options.queue)=="string"?"global":this.options.queue.scope).remove(this);
+}
+this.state="finished";
+},finalize:function(){
+this.render(1);
+this.cancel();
+this.event("beforeFinish");
+this.finish();
+this.event("afterFinish");
+},setup:function(){
+},finish:function(){
+},update:function(_663){
+},event:function(_664){
+if(this.options[_664+"Internal"]){
+this.options[_664+"Internal"](this);
+}
+if(this.options[_664]){
+this.options[_664](this);
+}
+},repr:function(){
+return "["+this.__class__.NAME+", options:"+MochiKit.Base.repr(this.options)+"]";
+}};
+MochiKit.Visual.Parallel=function(_665,_666){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_665,_666);
+}
+this.__init__(_665,_666);
+};
+MochiKit.Visual.Parallel.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Parallel.prototype,{__class__:MochiKit.Visual.Parallel,__init__:function(_668,_669){
+this.effects=_668||[];
+this.start(_669);
+},update:function(_66a){
+MochiKit.Base.map(function(_66b){
+_66b.render(_66a);
+},this.effects);
+},finish:function(){
+MochiKit.Base.map(function(_66c){
+_66c.finalize();
+},this.effects);
+}});
+MochiKit.Visual.Sequence=function(_66d,_66e){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_66d,_66e);
+}
+this.__init__(_66d,_66e);
+};
+MochiKit.Visual.Sequence.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Sequence.prototype,{__class__:MochiKit.Visual.Sequence,__init__:function(_670,_671){
+var defs={transition:MochiKit.Visual.Transitions.linear,duration:0};
+this.effects=_670||[];
+MochiKit.Base.map(function(_673){
+defs.duration+=_673.options.duration;
+},this.effects);
+MochiKit.Base.setdefault(_671,defs);
+this.start(_671);
+},update:function(_674){
+var time=_674*this.options.duration;
+for(var i=0;i<this.effects.length;i++){
+var _677=this.effects[i];
+if(time<=_677.options.duration){
+_677.render(time/_677.options.duration);
+break;
+}else{
+time-=_677.options.duration;
+}
+}
+},finish:function(){
+MochiKit.Base.map(function(_678){
+_678.finalize();
+},this.effects);
+}});
+MochiKit.Visual.Opacity=function(_679,_67a){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_679,_67a);
+}
+this.__init__(_679,_67a);
+};
+MochiKit.Visual.Opacity.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Opacity.prototype,{__class__:MochiKit.Visual.Opacity,__init__:function(_67c,_67d){
+var b=MochiKit.Base;
+var s=MochiKit.Style;
+this.element=MochiKit.DOM.getElement(_67c);
+if(this.element.currentStyle&&(!this.element.currentStyle.hasLayout)){
+s.setStyle(this.element,{zoom:1});
+}
+_67d=b.update({from:s.getStyle(this.element,"opacity")||0,to:1},_67d);
+this.start(_67d);
+},update:function(_680){
+MochiKit.Style.setStyle(this.element,{"opacity":_680});
+}});
+MochiKit.Visual.Move=function(_681,_682){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_681,_682);
+}
+this.__init__(_681,_682);
+};
+MochiKit.Visual.Move.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Move.prototype,{__class__:MochiKit.Visual.Move,__init__:function(_684,_685){
+this.element=MochiKit.DOM.getElement(_684);
+_685=MochiKit.Base.update({x:0,y:0,mode:"relative"},_685);
+this.start(_685);
+},setup:function(){
+MochiKit.Style.makePositioned(this.element);
+var s=this.element.style;
+var _687=s.visibility;
+var _688=s.display;
+if(_688=="none"){
+s.visibility="hidden";
+s.display="";
+}
+this.originalLeft=parseFloat(MochiKit.Style.getStyle(this.element,"left")||"0");
+this.originalTop=parseFloat(MochiKit.Style.getStyle(this.element,"top")||"0");
+if(this.options.mode=="absolute"){
+this.options.x-=this.originalLeft;
+this.options.y-=this.originalTop;
+}
+if(_688=="none"){
+s.visibility=_687;
+s.display=_688;
+}
+},update:function(_689){
+MochiKit.Style.setStyle(this.element,{left:Math.round(this.options.x*_689+this.originalLeft)+"px",top:Math.round(this.options.y*_689+this.originalTop)+"px"});
+}});
+MochiKit.Visual.Scale=function(_68a,_68b,_68c){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_68a,_68b,_68c);
+}
+this.__init__(_68a,_68b,_68c);
+};
+MochiKit.Visual.Scale.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Scale.prototype,{__class__:MochiKit.Visual.Scale,__init__:function(_68e,_68f,_690){
+this.element=MochiKit.DOM.getElement(_68e);
+_690=MochiKit.Base.update({scaleX:true,scaleY:true,scaleContent:true,scaleFromCenter:false,scaleMode:"box",scaleFrom:100,scaleTo:_68f},_690);
+this.start(_690);
+},setup:function(){
+this.restoreAfterFinish=this.options.restoreAfterFinish||false;
+this.elementPositioning=MochiKit.Style.getStyle(this.element,"position");
+var ma=MochiKit.Base.map;
+var b=MochiKit.Base.bind;
+this.originalStyle={};
+ma(b(function(k){
+this.originalStyle[k]=this.element.style[k];
+},this),["top","left","width","height","fontSize"]);
+this.originalTop=this.element.offsetTop;
+this.originalLeft=this.element.offsetLeft;
+var _694=MochiKit.Style.getStyle(this.element,"font-size")||"100%";
+ma(b(function(_695){
+if(_694.indexOf(_695)>0){
+this.fontSize=parseFloat(_694);
+this.fontSizeType=_695;
+}
+},this),["em","px","%"]);
+this.factor=(this.options.scaleTo-this.options.scaleFrom)/100;
+if(/^content/.test(this.options.scaleMode)){
+this.dims=[this.element.scrollHeight,this.element.scrollWidth];
+}else{
+if(this.options.scaleMode=="box"){
+this.dims=[this.element.offsetHeight,this.element.offsetWidth];
+}else{
+this.dims=[this.options.scaleMode.originalHeight,this.options.scaleMode.originalWidth];
+}
+}
+},update:function(_696){
+var _697=(this.options.scaleFrom/100)+(this.factor*_696);
+if(this.options.scaleContent&&this.fontSize){
+MochiKit.Style.setStyle(this.element,{fontSize:this.fontSize*_697+this.fontSizeType});
+}
+this.setDimensions(this.dims[0]*_697,this.dims[1]*_697);
+},finish:function(){
+if(this.restoreAfterFinish){
+MochiKit.Style.setStyle(this.element,this.originalStyle);
+}
+},setDimensions:function(_698,_699){
+var d={};
+var r=Math.round;
+if(/MSIE/.test(navigator.userAgent)){
+r=Math.ceil;
+}
+if(this.options.scaleX){
+d.width=r(_699)+"px";
+}
+if(this.options.scaleY){
+d.height=r(_698)+"px";
+}
+if(this.options.scaleFromCenter){
+var topd=(_698-this.dims[0])/2;
+var _69d=(_699-this.dims[1])/2;
+if(this.elementPositioning=="absolute"){
+if(this.options.scaleY){
+d.top=this.originalTop-topd+"px";
+}
+if(this.options.scaleX){
+d.left=this.originalLeft-_69d+"px";
+}
+}else{
+if(this.options.scaleY){
+d.top=-topd+"px";
+}
+if(this.options.scaleX){
+d.left=-_69d+"px";
+}
+}
+}
+MochiKit.Style.setStyle(this.element,d);
+}});
+MochiKit.Visual.Highlight=function(_69e,_69f){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_69e,_69f);
+}
+this.__init__(_69e,_69f);
+};
+MochiKit.Visual.Highlight.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Highlight.prototype,{__class__:MochiKit.Visual.Highlight,__init__:function(_6a1,_6a2){
+this.element=MochiKit.DOM.getElement(_6a1);
+_6a2=MochiKit.Base.update({startcolor:"#ffff99"},_6a2);
+this.start(_6a2);
+},setup:function(){
+var b=MochiKit.Base;
+var s=MochiKit.Style;
+if(s.getStyle(this.element,"display")=="none"){
+this.cancel();
+return;
+}
+this.oldStyle={backgroundImage:s.getStyle(this.element,"background-image")};
+s.setStyle(this.element,{backgroundImage:"none"});
+if(!this.options.endcolor){
+this.options.endcolor=MochiKit.Color.Color.fromBackground(this.element).toHexString();
+}
+if(b.isUndefinedOrNull(this.options.restorecolor)){
+this.options.restorecolor=s.getStyle(this.element,"background-color");
+}
+this._base=b.map(b.bind(function(i){
+return parseInt(this.options.startcolor.slice(i*2+1,i*2+3),16);
+},this),[0,1,2]);
+this._delta=b.map(b.bind(function(i){
+return parseInt(this.options.endcolor.slice(i*2+1,i*2+3),16)-this._base[i];
+},this),[0,1,2]);
+},update:function(_6a7){
+var m="#";
+MochiKit.Base.map(MochiKit.Base.bind(function(i){
+m+=MochiKit.Color.toColorPart(Math.round(this._base[i]+this._delta[i]*_6a7));
+},this),[0,1,2]);
+MochiKit.Style.setStyle(this.element,{backgroundColor:m});
+},finish:function(){
+MochiKit.Style.setStyle(this.element,MochiKit.Base.update(this.oldStyle,{backgroundColor:this.options.restorecolor}));
+}});
+MochiKit.Visual.ScrollTo=function(_6aa,_6ab){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_6aa,_6ab);
+}
+this.__init__(_6aa,_6ab);
+};
+MochiKit.Visual.ScrollTo.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.ScrollTo.prototype,{__class__:MochiKit.Visual.ScrollTo,__init__:function(_6ad,_6ae){
+this.element=MochiKit.DOM.getElement(_6ad);
+this.start(_6ae);
+},setup:function(){
+var p=MochiKit.Position;
+p.prepare();
+var _6b0=p.cumulativeOffset(this.element);
+if(this.options.offset){
+_6b0.y+=this.options.offset;
+}
+var max;
+if(window.innerHeight){
+max=window.innerHeight-window.height;
+}else{
+if(document.documentElement&&document.documentElement.clientHeight){
+max=document.documentElement.clientHeight-document.body.scrollHeight;
+}else{
+if(document.body){
+max=document.body.clientHeight-document.body.scrollHeight;
+}
+}
+}
+this.scrollStart=p.windowOffset.y;
+this.delta=(_6b0.y>max?max:_6b0.y)-this.scrollStart;
+},update:function(_6b2){
+var p=MochiKit.Position;
+p.prepare();
+window.scrollTo(p.windowOffset.x,this.scrollStart+(_6b2*this.delta));
+}});
+MochiKit.Visual.CSS_LENGTH=/^(([\+\-]?[0-9\.]+)(em|ex|px|in|cm|mm|pt|pc|\%))|0$/;
+MochiKit.Visual.Morph=function(_6b4,_6b5){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_6b4,_6b5);
+}
+this.__init__(_6b4,_6b5);
+};
+MochiKit.Visual.Morph.prototype=new MochiKit.Visual.Base();
+MochiKit.Base.update(MochiKit.Visual.Morph.prototype,{__class__:MochiKit.Visual.Morph,__init__:function(_6b7,_6b8){
+this.element=MochiKit.DOM.getElement(_6b7);
+this.start(_6b8);
+},setup:function(){
+var b=MochiKit.Base;
+var _6ba=this.options.style;
+this.styleStart={};
+this.styleEnd={};
+this.units={};
+var _6bb,unit;
+for(var s in _6ba){
+_6bb=_6ba[s];
+s=b.camelize(s);
+if(MochiKit.Visual.CSS_LENGTH.test(_6bb)){
+var _6be=_6bb.match(/^([\+\-]?[0-9\.]+)(.*)$/);
+_6bb=parseFloat(_6be[1]);
+unit=(_6be.length==3)?_6be[2]:null;
+this.styleEnd[s]=_6bb;
+this.units[s]=unit;
+_6bb=MochiKit.Style.getStyle(this.element,s);
+_6be=_6bb.match(/^([\+\-]?[0-9\.]+)(.*)$/);
+_6bb=parseFloat(_6be[1]);
+this.styleStart[s]=_6bb;
+}else{
+if(/[Cc]olor$/.test(s)){
+var c=MochiKit.Color.Color;
+_6bb=c.fromString(_6bb);
+if(_6bb){
+this.units[s]="color";
+this.styleEnd[s]=_6bb.toHexString();
+_6bb=MochiKit.Style.getStyle(this.element,s);
+this.styleStart[s]=c.fromString(_6bb).toHexString();
+this.styleStart[s]=b.map(b.bind(function(i){
+return parseInt(this.styleStart[s].slice(i*2+1,i*2+3),16);
+},this),[0,1,2]);
+this.styleEnd[s]=b.map(b.bind(function(i){
+return parseInt(this.styleEnd[s].slice(i*2+1,i*2+3),16);
+},this),[0,1,2]);
+}
+}else{
+this.element.style[s]=_6bb;
+}
+}
+}
+},update:function(_6c2){
+var _6c3;
+for(var s in this.styleStart){
+if(this.units[s]=="color"){
+var m="#";
+var _6c6=this.styleStart[s];
+var end=this.styleEnd[s];
+MochiKit.Base.map(MochiKit.Base.bind(function(i){
+m+=MochiKit.Color.toColorPart(Math.round(_6c6[i]+(end[i]-_6c6[i])*_6c2));
+},this),[0,1,2]);
+this.element.style[s]=m;
+}else{
+_6c3=this.styleStart[s]+Math.round((this.styleEnd[s]-this.styleStart[s])*_6c2*1000)/1000+this.units[s];
+this.element.style[s]=_6c3;
+}
+}
+}});
+MochiKit.Visual.fade=function(_6c9,_6ca){
+var s=MochiKit.Style;
+var _6cc=s.getStyle(_6c9,"opacity");
+_6ca=MochiKit.Base.update({from:s.getStyle(_6c9,"opacity")||1,to:0,afterFinishInternal:function(_6cd){
+if(_6cd.options.to!==0){
+return;
+}
+s.hideElement(_6cd.element);
+s.setStyle(_6cd.element,{"opacity":_6cc});
+}},_6ca);
+return new MochiKit.Visual.Opacity(_6c9,_6ca);
+};
+MochiKit.Visual.appear=function(_6ce,_6cf){
+var s=MochiKit.Style;
+var v=MochiKit.Visual;
+_6cf=MochiKit.Base.update({from:(s.getStyle(_6ce,"display")=="none"?0:s.getStyle(_6ce,"opacity")||0),to:1,afterFinishInternal:function(_6d2){
+v.forceRerendering(_6d2.element);
+},beforeSetupInternal:function(_6d3){
+s.setStyle(_6d3.element,{"opacity":_6d3.options.from});
+s.showElement(_6d3.element);
+}},_6cf);
+return new v.Opacity(_6ce,_6cf);
+};
+MochiKit.Visual.puff=function(_6d4,_6d5){
+var s=MochiKit.Style;
+var v=MochiKit.Visual;
+_6d4=MochiKit.DOM.getElement(_6d4);
+var _6d8=MochiKit.Style.getElementDimensions(_6d4,true);
+var _6d9={position:s.getStyle(_6d4,"position"),top:_6d4.style.top,left:_6d4.style.left,width:_6d4.style.width,height:_6d4.style.height,opacity:s.getStyle(_6d4,"opacity")};
+_6d5=MochiKit.Base.update({beforeSetupInternal:function(_6da){
+MochiKit.Position.absolutize(_6da.effects[0].element);
+},afterFinishInternal:function(_6db){
+s.hideElement(_6db.effects[0].element);
+s.setStyle(_6db.effects[0].element,_6d9);
+},scaleContent:true,scaleFromCenter:true},_6d5);
+return new v.Parallel([new v.Scale(_6d4,200,{sync:true,scaleFromCenter:_6d5.scaleFromCenter,scaleMode:{originalHeight:_6d8.h,originalWidth:_6d8.w},scaleContent:_6d5.scaleContent,restoreAfterFinish:true}),new v.Opacity(_6d4,{sync:true,to:0})],_6d5);
+};
+MochiKit.Visual.blindUp=function(_6dc,_6dd){
+var d=MochiKit.DOM;
+var s=MochiKit.Style;
+_6dc=d.getElement(_6dc);
+var _6e0=s.getElementDimensions(_6dc,true);
+var _6e1=s.makeClipping(_6dc);
+_6dd=MochiKit.Base.update({scaleContent:false,scaleX:false,scaleMode:{originalHeight:_6e0.h,originalWidth:_6e0.w},restoreAfterFinish:true,afterFinishInternal:function(_6e2){
+s.hideElement(_6e2.element);
+s.undoClipping(_6e2.element,_6e1);
+}},_6dd);
+return new MochiKit.Visual.Scale(_6dc,0,_6dd);
+};
+MochiKit.Visual.blindDown=function(_6e3,_6e4){
+var d=MochiKit.DOM;
+var s=MochiKit.Style;
+_6e3=d.getElement(_6e3);
+var _6e7=s.getElementDimensions(_6e3,true);
+var _6e8;
+_6e4=MochiKit.Base.update({scaleContent:false,scaleX:false,scaleFrom:0,scaleMode:{originalHeight:_6e7.h,originalWidth:_6e7.w},restoreAfterFinish:true,afterSetupInternal:function(_6e9){
+_6e8=s.makeClipping(_6e9.element);
+s.setStyle(_6e9.element,{height:"0px"});
+s.showElement(_6e9.element);
+},afterFinishInternal:function(_6ea){
+s.undoClipping(_6ea.element,_6e8);
+}},_6e4);
+return new MochiKit.Visual.Scale(_6e3,100,_6e4);
+};
+MochiKit.Visual.switchOff=function(_6eb,_6ec){
+var d=MochiKit.DOM;
+var s=MochiKit.Style;
+_6eb=d.getElement(_6eb);
+var _6ef=s.getElementDimensions(_6eb,true);
+var _6f0=s.getStyle(_6eb,"opacity");
+var _6f1;
+_6ec=MochiKit.Base.update({duration:0.7,restoreAfterFinish:true,beforeSetupInternal:function(_6f2){
+s.makePositioned(_6eb);
+_6f1=s.makeClipping(_6eb);
+},afterFinishInternal:function(_6f3){
+s.hideElement(_6eb);
+s.undoClipping(_6eb,_6f1);
+s.undoPositioned(_6eb);
+s.setStyle(_6eb,{"opacity":_6f0});
+}},_6ec);
+var v=MochiKit.Visual;
+return new v.Sequence([new v.appear(_6eb,{sync:true,duration:0.57*_6ec.duration,from:0,transition:v.Transitions.flicker}),new v.Scale(_6eb,1,{sync:true,duration:0.43*_6ec.duration,scaleFromCenter:true,scaleX:false,scaleMode:{originalHeight:_6ef.h,originalWidth:_6ef.w},scaleContent:false,restoreAfterFinish:true})],_6ec);
+};
+MochiKit.Visual.dropOut=function(_6f5,_6f6){
+var d=MochiKit.DOM;
+var s=MochiKit.Style;
+_6f5=d.getElement(_6f5);
+var _6f9={top:s.getStyle(_6f5,"top"),left:s.getStyle(_6f5,"left"),opacity:s.getStyle(_6f5,"opacity")};
+_6f6=MochiKit.Base.update({duration:0.5,distance:100,beforeSetupInternal:function(_6fa){
+s.makePositioned(_6fa.effects[0].element);
+},afterFinishInternal:function(_6fb){
+s.hideElement(_6fb.effects[0].element);
+s.undoPositioned(_6fb.effects[0].element);
+s.setStyle(_6fb.effects[0].element,_6f9);
+}},_6f6);
+var v=MochiKit.Visual;
+return new v.Parallel([new v.Move(_6f5,{x:0,y:_6f6.distance,sync:true}),new v.Opacity(_6f5,{sync:true,to:0})],_6f6);
+};
+MochiKit.Visual.shake=function(_6fd,_6fe){
+var d=MochiKit.DOM;
+var v=MochiKit.Visual;
+var s=MochiKit.Style;
+_6fd=d.getElement(_6fd);
+var _702={top:s.getStyle(_6fd,"top"),left:s.getStyle(_6fd,"left")};
+_6fe=MochiKit.Base.update({duration:0.5,afterFinishInternal:function(_703){
+s.undoPositioned(_6fd);
+s.setStyle(_6fd,_702);
+}},_6fe);
+return new v.Sequence([new v.Move(_6fd,{sync:true,duration:0.1*_6fe.duration,x:20,y:0}),new v.Move(_6fd,{sync:true,duration:0.2*_6fe.duration,x:-40,y:0}),new v.Move(_6fd,{sync:true,duration:0.2*_6fe.duration,x:40,y:0}),new v.Move(_6fd,{sync:true,duration:0.2*_6fe.duration,x:-40,y:0}),new v.Move(_6fd,{sync:true,duration:0.2*_6fe.duration,x:40,y:0}),new v.Move(_6fd,{sync:true,duration:0.1*_6fe.duration,x:-20,y:0})],_6fe);
+};
+MochiKit.Visual.slideDown=function(_704,_705){
+var d=MochiKit.DOM;
+var b=MochiKit.Base;
+var s=MochiKit.Style;
+_704=d.getElement(_704);
+if(!_704.firstChild){
+throw new Error("MochiKit.Visual.slideDown must be used on a element with a child");
+}
+d.removeEmptyTextNodes(_704);
+var _709=s.getStyle(_704.firstChild,"bottom")||0;
+var _70a=s.getElementDimensions(_704,true);
+var _70b;
+_705=b.update({scaleContent:false,scaleX:false,scaleFrom:0,scaleMode:{originalHeight:_70a.h,originalWidth:_70a.w},restoreAfterFinish:true,afterSetupInternal:function(_70c){
+s.makePositioned(_70c.element);
+s.makePositioned(_70c.element.firstChild);
+if(/Opera/.test(navigator.userAgent)){
+s.setStyle(_70c.element,{top:""});
+}
+_70b=s.makeClipping(_70c.element);
+s.setStyle(_70c.element,{height:"0px"});
+s.showElement(_70c.element);
+},afterUpdateInternal:function(_70d){
+var _70e=s.getElementDimensions(_70d.element,true);
+s.setStyle(_70d.element.firstChild,{bottom:(_70d.dims[0]-_70e.h)+"px"});
+},afterFinishInternal:function(_70f){
+s.undoClipping(_70f.element,_70b);
+if(/MSIE/.test(navigator.userAgent)){
+s.undoPositioned(_70f.element);
+s.undoPositioned(_70f.element.firstChild);
+}else{
+s.undoPositioned(_70f.element.firstChild);
+s.undoPositioned(_70f.element);
+}
+s.setStyle(_70f.element.firstChild,{bottom:_709});
+}},_705);
+return new MochiKit.Visual.Scale(_704,100,_705);
+};
+MochiKit.Visual.slideUp=function(_710,_711){
+var d=MochiKit.DOM;
+var b=MochiKit.Base;
+var s=MochiKit.Style;
+_710=d.getElement(_710);
+if(!_710.firstChild){
+throw new Error("MochiKit.Visual.slideUp must be used on a element with a child");
+}
+d.removeEmptyTextNodes(_710);
+var _715=s.getStyle(_710.firstChild,"bottom");
+var _716=s.getElementDimensions(_710,true);
+var _717;
+_711=b.update({scaleContent:false,scaleX:false,scaleMode:{originalHeight:_716.h,originalWidth:_716.w},scaleFrom:100,restoreAfterFinish:true,beforeStartInternal:function(_718){
+s.makePositioned(_718.element);
+s.makePositioned(_718.element.firstChild);
+if(/Opera/.test(navigator.userAgent)){
+s.setStyle(_718.element,{top:""});
+}
+_717=s.makeClipping(_718.element);
+s.showElement(_718.element);
+},afterUpdateInternal:function(_719){
+var _71a=s.getElementDimensions(_719.element,true);
+s.setStyle(_719.element.firstChild,{bottom:(_719.dims[0]-_71a.h)+"px"});
+},afterFinishInternal:function(_71b){
+s.hideElement(_71b.element);
+s.undoClipping(_71b.element,_717);
+s.undoPositioned(_71b.element.firstChild);
+s.undoPositioned(_71b.element);
+s.setStyle(_71b.element.firstChild,{bottom:_715});
+}},_711);
+return new MochiKit.Visual.Scale(_710,0,_711);
+};
+MochiKit.Visual.squish=function(_71c,_71d){
+var d=MochiKit.DOM;
+var b=MochiKit.Base;
+var s=MochiKit.Style;
+var _721=s.getElementDimensions(_71c,true);
+var _722;
+_71d=b.update({restoreAfterFinish:true,scaleMode:{originalHeight:_721.w,originalWidth:_721.h},beforeSetupInternal:function(_723){
+_722=s.makeClipping(_723.element);
+},afterFinishInternal:function(_724){
+s.hideElement(_724.element);
+s.undoClipping(_724.element,_722);
+}},_71d);
+return new MochiKit.Visual.Scale(_71c,/Opera/.test(navigator.userAgent)?1:0,_71d);
+};
+MochiKit.Visual.grow=function(_725,_726){
+var d=MochiKit.DOM;
+var v=MochiKit.Visual;
+var s=MochiKit.Style;
+_725=d.getElement(_725);
+_726=MochiKit.Base.update({direction:"center",moveTransition:v.Transitions.sinoidal,scaleTransition:v.Transitions.sinoidal,opacityTransition:v.Transitions.full,scaleContent:true,scaleFromCenter:false},_726);
+var _72a={top:_725.style.top,left:_725.style.left,height:_725.style.height,width:_725.style.width,opacity:s.getStyle(_725,"opacity")};
+var dims=s.getElementDimensions(_725,true);
+var _72c,_72d;
+var _72e,_72f;
+switch(_726.direction){
+case "top-left":
+_72c=_72d=_72e=_72f=0;
+break;
+case "top-right":
+_72c=dims.w;
+_72d=_72f=0;
+_72e=-dims.w;
+break;
+case "bottom-left":
+_72c=_72e=0;
+_72d=dims.h;
+_72f=-dims.h;
+break;
+case "bottom-right":
+_72c=dims.w;
+_72d=dims.h;
+_72e=-dims.w;
+_72f=-dims.h;
+break;
+case "center":
+_72c=dims.w/2;
+_72d=dims.h/2;
+_72e=-dims.w/2;
+_72f=-dims.h/2;
+break;
+}
+var _730=MochiKit.Base.update({beforeSetupInternal:function(_731){
+s.setStyle(_731.effects[0].element,{height:"0px"});
+s.showElement(_731.effects[0].element);
+},afterFinishInternal:function(_732){
+s.undoClipping(_732.effects[0].element);
+s.undoPositioned(_732.effects[0].element);
+s.setStyle(_732.effects[0].element,_72a);
+}},_726);
+return new v.Move(_725,{x:_72c,y:_72d,duration:0.01,beforeSetupInternal:function(_733){
+s.hideElement(_733.element);
+s.makeClipping(_733.element);
+s.makePositioned(_733.element);
+},afterFinishInternal:function(_734){
+new v.Parallel([new v.Opacity(_734.element,{sync:true,to:1,from:0,transition:_726.opacityTransition}),new v.Move(_734.element,{x:_72e,y:_72f,sync:true,transition:_726.moveTransition}),new v.Scale(_734.element,100,{scaleMode:{originalHeight:dims.h,originalWidth:dims.w},sync:true,scaleFrom:/Opera/.test(navigator.userAgent)?1:0,transition:_726.scaleTransition,scaleContent:_726.scaleContent,scaleFromCenter:_726.scaleFromCenter,restoreAfterFinish:true})],_730);
+}});
+};
+MochiKit.Visual.shrink=function(_735,_736){
+var d=MochiKit.DOM;
+var v=MochiKit.Visual;
+var s=MochiKit.Style;
+_735=d.getElement(_735);
+_736=MochiKit.Base.update({direction:"center",moveTransition:v.Transitions.sinoidal,scaleTransition:v.Transitions.sinoidal,opacityTransition:v.Transitions.none,scaleContent:true,scaleFromCenter:false},_736);
+var _73a={top:_735.style.top,left:_735.style.left,height:_735.style.height,width:_735.style.width,opacity:s.getStyle(_735,"opacity")};
+var dims=s.getElementDimensions(_735,true);
+var _73c,_73d;
+switch(_736.direction){
+case "top-left":
+_73c=_73d=0;
+break;
+case "top-right":
+_73c=dims.w;
+_73d=0;
+break;
+case "bottom-left":
+_73c=0;
+_73d=dims.h;
+break;
+case "bottom-right":
+_73c=dims.w;
+_73d=dims.h;
+break;
+case "center":
+_73c=dims.w/2;
+_73d=dims.h/2;
+break;
+}
+var _73e;
+var _73f=MochiKit.Base.update({beforeStartInternal:function(_740){
+s.makePositioned(_740.effects[0].element);
+_73e=s.makeClipping(_740.effects[0].element);
+},afterFinishInternal:function(_741){
+s.hideElement(_741.effects[0].element);
+s.undoClipping(_741.effects[0].element,_73e);
+s.undoPositioned(_741.effects[0].element);
+s.setStyle(_741.effects[0].element,_73a);
+}},_736);
+return new v.Parallel([new v.Opacity(_735,{sync:true,to:0,from:1,transition:_736.opacityTransition}),new v.Scale(_735,/Opera/.test(navigator.userAgent)?1:0,{scaleMode:{originalHeight:dims.h,originalWidth:dims.w},sync:true,transition:_736.scaleTransition,scaleContent:_736.scaleContent,scaleFromCenter:_736.scaleFromCenter,restoreAfterFinish:true}),new v.Move(_735,{x:_73c,y:_73d,sync:true,transition:_736.moveTransition})],_73f);
+};
+MochiKit.Visual.pulsate=function(_742,_743){
+var d=MochiKit.DOM;
+var v=MochiKit.Visual;
+var b=MochiKit.Base;
+var _747=MochiKit.Style.getStyle(_742,"opacity");
+_743=b.update({duration:3,from:0,afterFinishInternal:function(_748){
+MochiKit.Style.setStyle(_748.element,{"opacity":_747});
+}},_743);
+var _749=_743.transition||v.Transitions.sinoidal;
+_743.transition=function(pos){
+return _749(1-v.Transitions.pulse(pos,_743.pulses));
+};
+return new v.Opacity(_742,_743);
+};
+MochiKit.Visual.fold=function(_74b,_74c){
+var d=MochiKit.DOM;
+var v=MochiKit.Visual;
+var s=MochiKit.Style;
+_74b=d.getElement(_74b);
+var _750=s.getElementDimensions(_74b,true);
+var _751={top:_74b.style.top,left:_74b.style.left,width:_74b.style.width,height:_74b.style.height};
+var _752=s.makeClipping(_74b);
+_74c=MochiKit.Base.update({scaleContent:false,scaleX:false,scaleMode:{originalHeight:_750.h,originalWidth:_750.w},afterFinishInternal:function(_753){
+new v.Scale(_74b,1,{scaleContent:false,scaleY:false,scaleMode:{originalHeight:_750.h,originalWidth:_750.w},afterFinishInternal:function(_754){
+s.hideElement(_754.element);
+s.undoClipping(_754.element,_752);
+s.setStyle(_754.element,_751);
+}});
+}},_74c);
+return new v.Scale(_74b,5,_74c);
+};
+MochiKit.Visual.Color=MochiKit.Color.Color;
+MochiKit.Visual.getElementsComputedStyle=MochiKit.DOM.computedStyle;
+MochiKit.Visual.__new__=function(){
+var m=MochiKit.Base;
+m.nameFunctions(this);
+this.EXPORT_TAGS={":common":this.EXPORT,":all":m.concat(this.EXPORT,this.EXPORT_OK)};
+};
+MochiKit.Visual.EXPORT=["roundElement","roundClass","tagifyText","multiple","toggle","Parallel","Sequence","Opacity","Move","Scale","Highlight","ScrollTo","Morph","fade","appear","puff","blindUp","blindDown","switchOff","dropOut","shake","slideDown","slideUp","squish","grow","shrink","pulsate","fold"];
+MochiKit.Visual.EXPORT_OK=["Base","PAIRS"];
+MochiKit.Visual.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Visual);
+MochiKit.Base._deps("DragAndDrop",["Base","Iter","DOM","Signal","Visual","Position"]);
+MochiKit.DragAndDrop.NAME="MochiKit.DragAndDrop";
+MochiKit.DragAndDrop.VERSION="1.4.2";
+MochiKit.DragAndDrop.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.DragAndDrop.toString=function(){
+return this.__repr__();
+};
+MochiKit.DragAndDrop.EXPORT=["Droppable","Draggable"];
+MochiKit.DragAndDrop.EXPORT_OK=["Droppables","Draggables"];
+MochiKit.DragAndDrop.Droppables={drops:[],remove:function(_756){
+this.drops=MochiKit.Base.filter(function(d){
+return d.element!=MochiKit.DOM.getElement(_756);
+},this.drops);
+},register:function(drop){
+this.drops.push(drop);
+},unregister:function(drop){
+this.drops=MochiKit.Base.filter(function(d){
+return d!=drop;
+},this.drops);
+},prepare:function(_75b){
+MochiKit.Base.map(function(drop){
+if(drop.isAccepted(_75b)){
+if(drop.options.activeclass){
+MochiKit.DOM.addElementClass(drop.element,drop.options.activeclass);
+}
+drop.options.onactive(drop.element,_75b);
+}
+},this.drops);
+},findDeepestChild:function(_75d){
+deepest=_75d[0];
+for(i=1;i<_75d.length;++i){
+if(MochiKit.DOM.isChildNode(_75d[i].element,deepest.element)){
+deepest=_75d[i];
+}
+}
+return deepest;
+},show:function(_75e,_75f){
+if(!this.drops.length){
+return;
+}
+var _760=[];
+if(this.last_active){
+this.last_active.deactivate();
+}
+MochiKit.Iter.forEach(this.drops,function(drop){
+if(drop.isAffected(_75e,_75f)){
+_760.push(drop);
+}
+});
+if(_760.length>0){
+drop=this.findDeepestChild(_760);
+MochiKit.Position.within(drop.element,_75e.page.x,_75e.page.y);
+drop.options.onhover(_75f,drop.element,MochiKit.Position.overlap(drop.options.overlap,drop.element));
+drop.activate();
+}
+},fire:function(_762,_763){
+if(!this.last_active){
+return;
+}
+MochiKit.Position.prepare();
+if(this.last_active.isAffected(_762.mouse(),_763)){
+this.last_active.options.ondrop(_763,this.last_active.element,_762);
+}
+},reset:function(_764){
+MochiKit.Base.map(function(drop){
+if(drop.options.activeclass){
+MochiKit.DOM.removeElementClass(drop.element,drop.options.activeclass);
+}
+drop.options.ondesactive(drop.element,_764);
+},this.drops);
+if(this.last_active){
+this.last_active.deactivate();
+}
+}};
+MochiKit.DragAndDrop.Droppable=function(_766,_767){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_766,_767);
+}
+this.__init__(_766,_767);
+};
+MochiKit.DragAndDrop.Droppable.prototype={__class__:MochiKit.DragAndDrop.Droppable,__init__:function(_769,_76a){
+var d=MochiKit.DOM;
+var b=MochiKit.Base;
+this.element=d.getElement(_769);
+this.options=b.update({greedy:true,hoverclass:null,activeclass:null,hoverfunc:b.noop,accept:null,onactive:b.noop,ondesactive:b.noop,onhover:b.noop,ondrop:b.noop,containment:[],tree:false},_76a);
+this.options._containers=[];
+b.map(MochiKit.Base.bind(function(c){
+this.options._containers.push(d.getElement(c));
+},this),this.options.containment);
+MochiKit.Style.makePositioned(this.element);
+MochiKit.DragAndDrop.Droppables.register(this);
+},isContained:function(_76e){
+if(this.options._containers.length){
+var _76f;
+if(this.options.tree){
+_76f=_76e.treeNode;
+}else{
+_76f=_76e.parentNode;
+}
+return MochiKit.Iter.some(this.options._containers,function(c){
+return _76f==c;
+});
+}else{
+return true;
+}
+},isAccepted:function(_771){
+return ((!this.options.accept)||MochiKit.Iter.some(this.options.accept,function(c){
+return MochiKit.DOM.hasElementClass(_771,c);
+}));
+},isAffected:function(_773,_774){
+return ((this.element!=_774)&&this.isContained(_774)&&this.isAccepted(_774)&&MochiKit.Position.within(this.element,_773.page.x,_773.page.y));
+},deactivate:function(){
+if(this.options.hoverclass){
+MochiKit.DOM.removeElementClass(this.element,this.options.hoverclass);
+}
+this.options.hoverfunc(this.element,false);
+MochiKit.DragAndDrop.Droppables.last_active=null;
+},activate:function(){
+if(this.options.hoverclass){
+MochiKit.DOM.addElementClass(this.element,this.options.hoverclass);
+}
+this.options.hoverfunc(this.element,true);
+MochiKit.DragAndDrop.Droppables.last_active=this;
+},destroy:function(){
+MochiKit.DragAndDrop.Droppables.unregister(this);
+},repr:function(){
+return "["+this.__class__.NAME+", options:"+MochiKit.Base.repr(this.options)+"]";
+}};
+MochiKit.DragAndDrop.Draggables={drags:[],register:function(_775){
+if(this.drags.length===0){
+var conn=MochiKit.Signal.connect;
+this.eventMouseUp=conn(document,"onmouseup",this,this.endDrag);
+this.eventMouseMove=conn(document,"onmousemove",this,this.updateDrag);
+this.eventKeypress=conn(document,"onkeypress",this,this.keyPress);
+}
+this.drags.push(_775);
+},unregister:function(_777){
+this.drags=MochiKit.Base.filter(function(d){
+return d!=_777;
+},this.drags);
+if(this.drags.length===0){
+var disc=MochiKit.Signal.disconnect;
+disc(this.eventMouseUp);
+disc(this.eventMouseMove);
+disc(this.eventKeypress);
+}
+},activate:function(_77a){
+window.focus();
+this.activeDraggable=_77a;
+},deactivate:function(){
+this.activeDraggable=null;
+},updateDrag:function(_77b){
+if(!this.activeDraggable){
+return;
+}
+var _77c=_77b.mouse();
+if(this._lastPointer&&(MochiKit.Base.repr(this._lastPointer.page)==MochiKit.Base.repr(_77c.page))){
+return;
+}
+this._lastPointer=_77c;
+this.activeDraggable.updateDrag(_77b,_77c);
+},endDrag:function(_77d){
+if(!this.activeDraggable){
+return;
+}
+this._lastPointer=null;
+this.activeDraggable.endDrag(_77d);
+this.activeDraggable=null;
+},keyPress:function(_77e){
+if(this.activeDraggable){
+this.activeDraggable.keyPress(_77e);
+}
+},notify:function(_77f,_780,_781){
+MochiKit.Signal.signal(this,_77f,_780,_781);
+}};
+MochiKit.DragAndDrop.Draggable=function(_782,_783){
+var cls=arguments.callee;
+if(!(this instanceof cls)){
+return new cls(_782,_783);
+}
+this.__init__(_782,_783);
+};
+MochiKit.DragAndDrop.Draggable.prototype={__class__:MochiKit.DragAndDrop.Draggable,__init__:function(_785,_786){
+var v=MochiKit.Visual;
+var b=MochiKit.Base;
+_786=b.update({handle:false,starteffect:function(_789){
+this._savedOpacity=MochiKit.Style.getStyle(_789,"opacity")||1;
+new v.Opacity(_789,{duration:0.2,from:this._savedOpacity,to:0.7});
+},reverteffect:function(_78a,_78b,_78c){
+var dur=Math.sqrt(Math.abs(_78b^2)+Math.abs(_78c^2))*0.02;
+return new v.Move(_78a,{x:-_78c,y:-_78b,duration:dur});
+},endeffect:function(_78e){
+new v.Opacity(_78e,{duration:0.2,from:0.7,to:this._savedOpacity});
+},onchange:b.noop,zindex:1000,revert:false,scroll:false,scrollSensitivity:20,scrollSpeed:15,snap:false},_786);
+var d=MochiKit.DOM;
+this.element=d.getElement(_785);
+if(_786.handle&&(typeof (_786.handle)=="string")){
+this.handle=d.getFirstElementByTagAndClassName(null,_786.handle,this.element);
+}
+if(!this.handle){
+this.handle=d.getElement(_786.handle);
+}
+if(!this.handle){
+this.handle=this.element;
+}
+if(_786.scroll&&!_786.scroll.scrollTo&&!_786.scroll.outerHTML){
+_786.scroll=d.getElement(_786.scroll);
+this._isScrollChild=MochiKit.DOM.isChildNode(this.element,_786.scroll);
+}
+MochiKit.Style.makePositioned(this.element);
+this.delta=this.currentDelta();
+this.options=_786;
+this.dragging=false;
+this.eventMouseDown=MochiKit.Signal.connect(this.handle,"onmousedown",this,this.initDrag);
+MochiKit.DragAndDrop.Draggables.register(this);
+},destroy:function(){
+MochiKit.Signal.disconnect(this.eventMouseDown);
+MochiKit.DragAndDrop.Draggables.unregister(this);
+},currentDelta:function(){
+var s=MochiKit.Style.getStyle;
+return [parseInt(s(this.element,"left")||"0"),parseInt(s(this.element,"top")||"0")];
+},initDrag:function(_791){
+if(!_791.mouse().button.left){
+return;
+}
+var src=_791.target();
+var _793=(src.tagName||"").toUpperCase();
+if(_793==="INPUT"||_793==="SELECT"||_793==="OPTION"||_793==="BUTTON"||_793==="TEXTAREA"){
+return;
+}
+if(this._revert){
+this._revert.cancel();
+this._revert=null;
+}
+var _794=_791.mouse();
+var pos=MochiKit.Position.cumulativeOffset(this.element);
+this.offset=[_794.page.x-pos.x,_794.page.y-pos.y];
+MochiKit.DragAndDrop.Draggables.activate(this);
+_791.stop();
+},startDrag:function(_796){
+this.dragging=true;
+if(this.options.selectclass){
+MochiKit.DOM.addElementClass(this.element,this.options.selectclass);
+}
+if(this.options.zindex){
+this.originalZ=parseInt(MochiKit.Style.getStyle(this.element,"z-index")||"0");
+this.element.style.zIndex=this.options.zindex;
+}
+if(this.options.ghosting){
+this._clone=this.element.cloneNode(true);
+this.ghostPosition=MochiKit.Position.absolutize(this.element);
+this.element.parentNode.insertBefore(this._clone,this.element);
+}
+if(this.options.scroll){
+if(this.options.scroll==window){
+var _797=this._getWindowScroll(this.options.scroll);
+this.originalScrollLeft=_797.left;
+this.originalScrollTop=_797.top;
+}else{
+this.originalScrollLeft=this.options.scroll.scrollLeft;
+this.originalScrollTop=this.options.scroll.scrollTop;
+}
+}
+MochiKit.DragAndDrop.Droppables.prepare(this.element);
+MochiKit.DragAndDrop.Draggables.notify("start",this,_796);
+if(this.options.starteffect){
+this.options.starteffect(this.element);
+}
+},updateDrag:function(_798,_799){
+if(!this.dragging){
+this.startDrag(_798);
+}
+MochiKit.Position.prepare();
+MochiKit.DragAndDrop.Droppables.show(_799,this.element);
+MochiKit.DragAndDrop.Draggables.notify("drag",this,_798);
+this.draw(_799);
+this.options.onchange(this);
+if(this.options.scroll){
+this.stopScrolling();
+var p,q;
+if(this.options.scroll==window){
+var s=this._getWindowScroll(this.options.scroll);
+p=new MochiKit.Style.Coordinates(s.left,s.top);
+q=new MochiKit.Style.Coordinates(s.left+s.width,s.top+s.height);
+}else{
+p=MochiKit.Position.page(this.options.scroll);
+p.x+=this.options.scroll.scrollLeft;
+p.y+=this.options.scroll.scrollTop;
+p.x+=(window.pageXOffset||document.documentElement.scrollLeft||document.body.scrollLeft||0);
+p.y+=(window.pageYOffset||document.documentElement.scrollTop||document.body.scrollTop||0);
+q=new MochiKit.Style.Coordinates(p.x+this.options.scroll.offsetWidth,p.y+this.options.scroll.offsetHeight);
+}
+var _79d=[0,0];
+if(_799.page.x>(q.x-this.options.scrollSensitivity)){
+_79d[0]=_799.page.x-(q.x-this.options.scrollSensitivity);
+}else{
+if(_799.page.x<(p.x+this.options.scrollSensitivity)){
+_79d[0]=_799.page.x-(p.x+this.options.scrollSensitivity);
+}
+}
+if(_799.page.y>(q.y-this.options.scrollSensitivity)){
+_79d[1]=_799.page.y-(q.y-this.options.scrollSensitivity);
+}else{
+if(_799.page.y<(p.y+this.options.scrollSensitivity)){
+_79d[1]=_799.page.y-(p.y+this.options.scrollSensitivity);
+}
+}
+this.startScrolling(_79d);
+}
+if(/AppleWebKit/.test(navigator.appVersion)){
+window.scrollBy(0,0);
+}
+_798.stop();
+},finishDrag:function(_79e,_79f){
+var dr=MochiKit.DragAndDrop;
+this.dragging=false;
+if(this.options.selectclass){
+MochiKit.DOM.removeElementClass(this.element,this.options.selectclass);
+}
+if(this.options.ghosting){
+MochiKit.Position.relativize(this.element,this.ghostPosition);
+MochiKit.DOM.removeElement(this._clone);
+this._clone=null;
+}
+if(_79f){
+dr.Droppables.fire(_79e,this.element);
+}
+dr.Draggables.notify("end",this,_79e);
+var _7a1=this.options.revert;
+if(_7a1&&typeof (_7a1)=="function"){
+_7a1=_7a1(this.element);
+}
+var d=this.currentDelta();
+if(_7a1&&this.options.reverteffect){
+this._revert=this.options.reverteffect(this.element,d[1]-this.delta[1],d[0]-this.delta[0]);
+}else{
+this.delta=d;
+}
+if(this.options.zindex){
+this.element.style.zIndex=this.originalZ;
+}
+if(this.options.endeffect){
+this.options.endeffect(this.element);
+}
+dr.Draggables.deactivate();
+dr.Droppables.reset(this.element);
+},keyPress:function(_7a3){
+if(_7a3.key().string!="KEY_ESCAPE"){
+return;
+}
+this.finishDrag(_7a3,false);
+_7a3.stop();
+},endDrag:function(_7a4){
+if(!this.dragging){
+return;
+}
+this.stopScrolling();
+this.finishDrag(_7a4,true);
+_7a4.stop();
+},draw:function(_7a5){
+var pos=MochiKit.Position.cumulativeOffset(this.element);
+var d=this.currentDelta();
+pos.x-=d[0];
+pos.y-=d[1];
+if(this.options.scroll&&(this.options.scroll!=window&&this._isScrollChild)){
+pos.x-=this.options.scroll.scrollLeft-this.originalScrollLeft;
+pos.y-=this.options.scroll.scrollTop-this.originalScrollTop;
+}
+var p=[_7a5.page.x-pos.x-this.offset[0],_7a5.page.y-pos.y-this.offset[1]];
+if(this.options.snap){
+if(typeof (this.options.snap)=="function"){
+p=this.options.snap(p[0],p[1]);
+}else{
+if(this.options.snap instanceof Array){
+var i=-1;
+p=MochiKit.Base.map(MochiKit.Base.bind(function(v){
+i+=1;
+return Math.round(v/this.options.snap[i])*this.options.snap[i];
+},this),p);
+}else{
+p=MochiKit.Base.map(MochiKit.Base.bind(function(v){
+return Math.round(v/this.options.snap)*this.options.snap;
+},this),p);
+}
+}
+}
+var _7ac=this.element.style;
+if((!this.options.constraint)||(this.options.constraint=="horizontal")){
+_7ac.left=p[0]+"px";
+}
+if((!this.options.constraint)||(this.options.constraint=="vertical")){
+_7ac.top=p[1]+"px";
+}
+if(_7ac.visibility=="hidden"){
+_7ac.visibility="";
+}
+},stopScrolling:function(){
+if(this.scrollInterval){
+clearInterval(this.scrollInterval);
+this.scrollInterval=null;
+MochiKit.DragAndDrop.Draggables._lastScrollPointer=null;
+}
+},startScrolling:function(_7ad){
+if(!_7ad[0]&&!_7ad[1]){
+return;
+}
+this.scrollSpeed=[_7ad[0]*this.options.scrollSpeed,_7ad[1]*this.options.scrollSpeed];
+this.lastScrolled=new Date();
+this.scrollInterval=setInterval(MochiKit.Base.bind(this.scroll,this),10);
+},scroll:function(){
+var _7ae=new Date();
+var _7af=_7ae-this.lastScrolled;
+this.lastScrolled=_7ae;
+if(this.options.scroll==window){
+var s=this._getWindowScroll(this.options.scroll);
+if(this.scrollSpeed[0]||this.scrollSpeed[1]){
+var dm=_7af/1000;
+this.options.scroll.scrollTo(s.left+dm*this.scrollSpeed[0],s.top+dm*this.scrollSpeed[1]);
+}
+}else{
+this.options.scroll.scrollLeft+=this.scrollSpeed[0]*_7af/1000;
+this.options.scroll.scrollTop+=this.scrollSpeed[1]*_7af/1000;
+}
+var d=MochiKit.DragAndDrop;
+MochiKit.Position.prepare();
+d.Droppables.show(d.Draggables._lastPointer,this.element);
+d.Draggables.notify("drag",this);
+if(this._isScrollChild){
+d.Draggables._lastScrollPointer=d.Draggables._lastScrollPointer||d.Draggables._lastPointer;
+d.Draggables._lastScrollPointer.x+=this.scrollSpeed[0]*_7af/1000;
+d.Draggables._lastScrollPointer.y+=this.scrollSpeed[1]*_7af/1000;
+if(d.Draggables._lastScrollPointer.x<0){
+d.Draggables._lastScrollPointer.x=0;
+}
+if(d.Draggables._lastScrollPointer.y<0){
+d.Draggables._lastScrollPointer.y=0;
+}
+this.draw(d.Draggables._lastScrollPointer);
+}
+this.options.onchange(this);
+},_getWindowScroll:function(win){
+var vp,w,h;
+MochiKit.DOM.withWindow(win,function(){
+vp=MochiKit.Style.getViewportPosition(win.document);
+});
+if(win.innerWidth){
+w=win.innerWidth;
+h=win.innerHeight;
+}else{
+if(win.document.documentElement&&win.document.documentElement.clientWidth){
+w=win.document.documentElement.clientWidth;
+h=win.document.documentElement.clientHeight;
+}else{
+w=win.document.body.offsetWidth;
+h=win.document.body.offsetHeight;
+}
+}
+return {top:vp.y,left:vp.x,width:w,height:h};
+},repr:function(){
+return "["+this.__class__.NAME+", options:"+MochiKit.Base.repr(this.options)+"]";
+}};
+MochiKit.DragAndDrop.__new__=function(){
+MochiKit.Base.nameFunctions(this);
+this.EXPORT_TAGS={":common":this.EXPORT,":all":MochiKit.Base.concat(this.EXPORT,this.EXPORT_OK)};
+};
+MochiKit.DragAndDrop.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.DragAndDrop);
+MochiKit.Base._deps("Sortable",["Base","Iter","DOM","Position","DragAndDrop"]);
+MochiKit.Sortable.NAME="MochiKit.Sortable";
+MochiKit.Sortable.VERSION="1.4.2";
+MochiKit.Sortable.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.Sortable.toString=function(){
+return this.__repr__();
+};
+MochiKit.Sortable.EXPORT=[];
+MochiKit.Sortable.EXPORT_OK=[];
+MochiKit.Base.update(MochiKit.Sortable,{sortables:{},_findRootElement:function(_7b7){
+while(_7b7.tagName.toUpperCase()!="BODY"){
+if(_7b7.id&&MochiKit.Sortable.sortables[_7b7.id]){
+return _7b7;
+}
+_7b7=_7b7.parentNode;
+}
+},_createElementId:function(_7b8){
+if(_7b8.id==null||_7b8.id==""){
+var d=MochiKit.DOM;
+var id;
+var _7bb=1;
+while(d.getElement(id="sortable"+_7bb)!=null){
+_7bb+=1;
+}
+d.setNodeAttribute(_7b8,"id",id);
+}
+},options:function(_7bc){
+_7bc=MochiKit.Sortable._findRootElement(MochiKit.DOM.getElement(_7bc));
+if(!_7bc){
+return;
+}
+return MochiKit.Sortable.sortables[_7bc.id];
+},destroy:function(_7bd){
+var s=MochiKit.Sortable.options(_7bd);
+var b=MochiKit.Base;
+var d=MochiKit.DragAndDrop;
+if(s){
+MochiKit.Signal.disconnect(s.startHandle);
+MochiKit.Signal.disconnect(s.endHandle);
+b.map(function(dr){
+d.Droppables.remove(dr);
+},s.droppables);
+b.map(function(dr){
+dr.destroy();
+},s.draggables);
+delete MochiKit.Sortable.sortables[s.element.id];
+}
+},create:function(_7c3,_7c4){
+_7c3=MochiKit.DOM.getElement(_7c3);
+var self=MochiKit.Sortable;
+self._createElementId(_7c3);
+_7c4=MochiKit.Base.update({element:_7c3,tag:"li",dropOnEmpty:false,tree:false,treeTag:"ul",overlap:"vertical",constraint:"vertical",containment:[_7c3],handle:false,only:false,hoverclass:null,ghosting:false,scroll:false,scrollSensitivity:20,scrollSpeed:15,format:/^[^_]*_(.*)$/,onChange:MochiKit.Base.noop,onUpdate:MochiKit.Base.noop,accept:null},_7c4);
+self.destroy(_7c3);
+var _7c6={revert:true,ghosting:_7c4.ghosting,scroll:_7c4.scroll,scrollSensitivity:_7c4.scrollSensitivity,scrollSpeed:_7c4.scrollSpeed,constraint:_7c4.constraint,handle:_7c4.handle};
+if(_7c4.starteffect){
+_7c6.starteffect=_7c4.starteffect;
+}
+if(_7c4.reverteffect){
+_7c6.reverteffect=_7c4.reverteffect;
+}else{
+if(_7c4.ghosting){
+_7c6.reverteffect=function(_7c7){
+_7c7.style.top=0;
+_7c7.style.left=0;
+};
+}
+}
+if(_7c4.endeffect){
+_7c6.endeffect=_7c4.endeffect;
+}
+if(_7c4.zindex){
+_7c6.zindex=_7c4.zindex;
+}
+var _7c8={overlap:_7c4.overlap,containment:_7c4.containment,hoverclass:_7c4.hoverclass,onhover:self.onHover,tree:_7c4.tree,accept:_7c4.accept};
+var _7c9={onhover:self.onEmptyHover,overlap:_7c4.overlap,containment:_7c4.containment,hoverclass:_7c4.hoverclass,accept:_7c4.accept};
+MochiKit.DOM.removeEmptyTextNodes(_7c3);
+_7c4.draggables=[];
+_7c4.droppables=[];
+if(_7c4.dropOnEmpty||_7c4.tree){
+new MochiKit.DragAndDrop.Droppable(_7c3,_7c9);
+_7c4.droppables.push(_7c3);
+}
+MochiKit.Base.map(function(e){
+var _7cb=_7c4.handle?MochiKit.DOM.getFirstElementByTagAndClassName(null,_7c4.handle,e):e;
+_7c4.draggables.push(new MochiKit.DragAndDrop.Draggable(e,MochiKit.Base.update(_7c6,{handle:_7cb})));
+new MochiKit.DragAndDrop.Droppable(e,_7c8);
+if(_7c4.tree){
+e.treeNode=_7c3;
+}
+_7c4.droppables.push(e);
+},(self.findElements(_7c3,_7c4)||[]));
+if(_7c4.tree){
+MochiKit.Base.map(function(e){
+new MochiKit.DragAndDrop.Droppable(e,_7c9);
+e.treeNode=_7c3;
+_7c4.droppables.push(e);
+},(self.findTreeElements(_7c3,_7c4)||[]));
+}
+self.sortables[_7c3.id]=_7c4;
+_7c4.lastValue=self.serialize(_7c3);
+_7c4.startHandle=MochiKit.Signal.connect(MochiKit.DragAndDrop.Draggables,"start",MochiKit.Base.partial(self.onStart,_7c3));
+_7c4.endHandle=MochiKit.Signal.connect(MochiKit.DragAndDrop.Draggables,"end",MochiKit.Base.partial(self.onEnd,_7c3));
+},onStart:function(_7cd,_7ce){
+var self=MochiKit.Sortable;
+var _7d0=self.options(_7cd);
+_7d0.lastValue=self.serialize(_7d0.element);
+},onEnd:function(_7d1,_7d2){
+var self=MochiKit.Sortable;
+self.unmark();
+var _7d4=self.options(_7d1);
+if(_7d4.lastValue!=self.serialize(_7d4.element)){
+_7d4.onUpdate(_7d4.element);
+}
+},findElements:function(_7d5,_7d6){
+return MochiKit.Sortable.findChildren(_7d5,_7d6.only,_7d6.tree,_7d6.tag);
+},findTreeElements:function(_7d7,_7d8){
+return MochiKit.Sortable.findChildren(_7d7,_7d8.only,_7d8.tree?true:false,_7d8.treeTag);
+},findChildren:function(_7d9,only,_7db,_7dc){
+if(!_7d9.hasChildNodes()){
+return null;
+}
+_7dc=_7dc.toUpperCase();
+if(only){
+only=MochiKit.Base.flattenArray([only]);
+}
+var _7dd=[];
+MochiKit.Base.map(function(e){
+if(e.tagName&&e.tagName.toUpperCase()==_7dc&&(!only||MochiKit.Iter.some(only,function(c){
+return MochiKit.DOM.hasElementClass(e,c);
+}))){
+_7dd.push(e);
+}
+if(_7db){
+var _7e0=MochiKit.Sortable.findChildren(e,only,_7db,_7dc);
+if(_7e0&&_7e0.length>0){
+_7dd=_7dd.concat(_7e0);
+}
+}
+},_7d9.childNodes);
+return _7dd;
+},onHover:function(_7e1,_7e2,_7e3){
+if(MochiKit.DOM.isChildNode(_7e2,_7e1)){
+return;
+}
+var self=MochiKit.Sortable;
+if(_7e3>0.33&&_7e3<0.66&&self.options(_7e2).tree){
+return;
+}else{
+if(_7e3>0.5){
+self.mark(_7e2,"before");
+if(_7e2.previousSibling!=_7e1){
+var _7e5=_7e1.parentNode;
+_7e1.style.visibility="hidden";
+_7e2.parentNode.insertBefore(_7e1,_7e2);
+if(_7e2.parentNode!=_7e5){
+self.options(_7e5).onChange(_7e1);
+}
+self.options(_7e2.parentNode).onChange(_7e1);
+}
+}else{
+self.mark(_7e2,"after");
+var _7e6=_7e2.nextSibling||null;
+if(_7e6!=_7e1){
+var _7e5=_7e1.parentNode;
+_7e1.style.visibility="hidden";
+_7e2.parentNode.insertBefore(_7e1,_7e6);
+if(_7e2.parentNode!=_7e5){
+self.options(_7e5).onChange(_7e1);
+}
+self.options(_7e2.parentNode).onChange(_7e1);
+}
+}
+}
+},_offsetSize:function(_7e7,type){
+if(type=="vertical"||type=="height"){
+return _7e7.offsetHeight;
+}else{
+return _7e7.offsetWidth;
+}
+},onEmptyHover:function(_7e9,_7ea,_7eb){
+var _7ec=_7e9.parentNode;
+var self=MochiKit.Sortable;
+var _7ee=self.options(_7ea);
+if(!MochiKit.DOM.isChildNode(_7ea,_7e9)){
+var _7ef;
+var _7f0=self.findElements(_7ea,{tag:_7ee.tag,only:_7ee.only});
+var _7f1=null;
+if(_7f0){
+var _7f2=self._offsetSize(_7ea,_7ee.overlap)*(1-_7eb);
+for(_7ef=0;_7ef<_7f0.length;_7ef+=1){
+if(_7f2-self._offsetSize(_7f0[_7ef],_7ee.overlap)>=0){
+_7f2-=self._offsetSize(_7f0[_7ef],_7ee.overlap);
+}else{
+if(_7f2-(self._offsetSize(_7f0[_7ef],_7ee.overlap)/2)>=0){
+_7f1=_7ef+1<_7f0.length?_7f0[_7ef+1]:null;
+break;
+}else{
+_7f1=_7f0[_7ef];
+break;
+}
+}
+}
+}
+_7ea.insertBefore(_7e9,_7f1);
+self.options(_7ec).onChange(_7e9);
+_7ee.onChange(_7e9);
+}
+},unmark:function(){
+var m=MochiKit.Sortable._marker;
+if(m){
+MochiKit.Style.hideElement(m);
+}
+},mark:function(_7f4,_7f5){
+var d=MochiKit.DOM;
+var self=MochiKit.Sortable;
+var _7f8=self.options(_7f4.parentNode);
+if(_7f8&&!_7f8.ghosting){
+return;
+}
+if(!self._marker){
+self._marker=d.getElement("dropmarker")||document.createElement("DIV");
+MochiKit.Style.hideElement(self._marker);
+d.addElementClass(self._marker,"dropmarker");
+self._marker.style.position="absolute";
+document.getElementsByTagName("body").item(0).appendChild(self._marker);
+}
+var _7f9=MochiKit.Position.cumulativeOffset(_7f4);
+self._marker.style.left=_7f9.x+"px";
+self._marker.style.top=_7f9.y+"px";
+if(_7f5=="after"){
+if(_7f8.overlap=="horizontal"){
+self._marker.style.left=(_7f9.x+_7f4.clientWidth)+"px";
+}else{
+self._marker.style.top=(_7f9.y+_7f4.clientHeight)+"px";
+}
+}
+MochiKit.Style.showElement(self._marker);
+},_tree:function(_7fa,_7fb,_7fc){
+var self=MochiKit.Sortable;
+var _7fe=self.findElements(_7fa,_7fb)||[];
+for(var i=0;i<_7fe.length;++i){
+var _800=_7fe[i].id.match(_7fb.format);
+if(!_800){
+continue;
+}
+var _801={id:encodeURIComponent(_800?_800[1]:null),element:_7fa,parent:_7fc,children:[],position:_7fc.children.length,container:self._findChildrenElement(_7fe[i],_7fb.treeTag.toUpperCase())};
+if(_801.container){
+self._tree(_801.container,_7fb,_801);
+}
+_7fc.children.push(_801);
+}
+return _7fc;
+},_findChildrenElement:function(_802,_803){
+if(_802&&_802.hasChildNodes){
+_803=_803.toUpperCase();
+for(var i=0;i<_802.childNodes.length;++i){
+if(_802.childNodes[i].tagName.toUpperCase()==_803){
+return _802.childNodes[i];
+}
+}
+}
+return null;
+},tree:function(_805,_806){
+_805=MochiKit.DOM.getElement(_805);
+var _807=MochiKit.Sortable.options(_805);
+_806=MochiKit.Base.update({tag:_807.tag,treeTag:_807.treeTag,only:_807.only,name:_805.id,format:_807.format},_806||{});
+var root={id:null,parent:null,children:new Array,container:_805,position:0};
+return MochiKit.Sortable._tree(_805,_806,root);
+},setSequence:function(_809,_80a,_80b){
+var self=MochiKit.Sortable;
+var b=MochiKit.Base;
+_809=MochiKit.DOM.getElement(_809);
+_80b=b.update(self.options(_809),_80b||{});
+var _80e={};
+b.map(function(n){
+var m=n.id.match(_80b.format);
+if(m){
+_80e[m[1]]=[n,n.parentNode];
+}
+n.parentNode.removeChild(n);
+},self.findElements(_809,_80b));
+b.map(function(_811){
+var n=_80e[_811];
+if(n){
+n[1].appendChild(n[0]);
+delete _80e[_811];
+}
+},_80a);
+},_constructIndex:function(node){
+var _814="";
+do{
+if(node.id){
+_814="["+node.position+"]"+_814;
+}
+}while((node=node.parent)!=null);
+return _814;
+},sequence:function(_815,_816){
+_815=MochiKit.DOM.getElement(_815);
+var self=MochiKit.Sortable;
+var _816=MochiKit.Base.update(self.options(_815),_816||{});
+return MochiKit.Base.map(function(item){
+return item.id.match(_816.format)?item.id.match(_816.format)[1]:"";
+},MochiKit.DOM.getElement(self.findElements(_815,_816)||[]));
+},serialize:function(_819,_81a){
+_819=MochiKit.DOM.getElement(_819);
+var self=MochiKit.Sortable;
+_81a=MochiKit.Base.update(self.options(_819),_81a||{});
+var name=encodeURIComponent(_81a.name||_819.id);
+if(_81a.tree){
+return MochiKit.Base.flattenArray(MochiKit.Base.map(function(item){
+return [name+self._constructIndex(item)+"[id]="+encodeURIComponent(item.id)].concat(item.children.map(arguments.callee));
+},self.tree(_819,_81a).children)).join("&");
+}else{
+return MochiKit.Base.map(function(item){
+return name+"[]="+encodeURIComponent(item);
+},self.sequence(_819,_81a)).join("&");
+}
+}});
+MochiKit.Sortable.Sortable=MochiKit.Sortable;
+MochiKit.Sortable.__new__=function(){
+MochiKit.Base.nameFunctions(this);
+this.EXPORT_TAGS={":common":this.EXPORT,":all":MochiKit.Base.concat(this.EXPORT,this.EXPORT_OK)};
+};
+MochiKit.Sortable.__new__();
+MochiKit.Base._exportSymbols(this,MochiKit.Sortable);
+if(typeof (MochiKit)=="undefined"){
+MochiKit={};
+}
+if(typeof (MochiKit.MochiKit)=="undefined"){
+MochiKit.MochiKit={};
+}
+MochiKit.MochiKit.NAME="MochiKit.MochiKit";
+MochiKit.MochiKit.VERSION="1.4.2";
+MochiKit.MochiKit.__repr__=function(){
+return "["+this.NAME+" "+this.VERSION+"]";
+};
+MochiKit.MochiKit.toString=function(){
+return this.__repr__();
+};
+MochiKit.MochiKit.SUBMODULES=["Base","Iter","Logging","DateTime","Format","Async","DOM","Selector","Style","LoggingPane","Color","Signal","Position","Visual","DragAndDrop","Sortable"];
+if(typeof (JSAN)!="undefined"||typeof (dojo)!="undefined"){
+if(typeof (dojo)!="undefined"){
+dojo.provide("MochiKit.MochiKit");
+(function(lst){
+for(var i=0;i<lst.length;i++){
+dojo.require("MochiKit."+lst[i]);
+}
+})(MochiKit.MochiKit.SUBMODULES);
+}
+if(typeof (JSAN)!="undefined"){
+(function(lst){
+for(var i=0;i<lst.length;i++){
+JSAN.use("MochiKit."+lst[i],[]);
+}
+})(MochiKit.MochiKit.SUBMODULES);
+}
+(function(){
+var _823=MochiKit.Base.extend;
+var self=MochiKit.MochiKit;
+var _825=self.SUBMODULES;
+var _826=[];
+var _827=[];
+var _828={};
+var i,k,m,all;
+for(i=0;i<_825.length;i++){
+m=MochiKit[_825[i]];
+_823(_826,m.EXPORT);
+_823(_827,m.EXPORT_OK);
+for(k in m.EXPORT_TAGS){
+_828[k]=_823(_828[k],m.EXPORT_TAGS[k]);
+}
+all=m.EXPORT_TAGS[":all"];
+if(!all){
+all=_823(null,m.EXPORT,m.EXPORT_OK);
+}
+var j;
+for(j=0;j<all.length;j++){
+k=all[j];
+self[k]=m[k];
+}
+}
+self.EXPORT=_826;
+self.EXPORT_OK=_827;
+self.EXPORT_TAGS=_828;
+}());
+}else{
+if(typeof (MochiKit.__compat__)=="undefined"){
+MochiKit.__compat__=true;
+}
+(function(){
+if(typeof (document)=="undefined"){
+return;
+}
+var _82e=document.getElementsByTagName("script");
+var _82f="http://www.w3.org/1999/xhtml";
+var _830="http://www.w3.org/2000/svg";
+var _831="http://www.w3.org/1999/xlink";
+var _832="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
+var base=null;
+var _834=null;
+var _835={};
+var i;
+var src;
+for(i=0;i<_82e.length;i++){
+src=null;
+switch(_82e[i].namespaceURI){
+case _830:
+src=_82e[i].getAttributeNS(_831,"href");
+break;
+default:
+src=_82e[i].getAttribute("src");
+break;
+}
+if(!src){
+continue;
+}
+_835[src]=true;
+if(src.match(/MochiKit.js(\?.*)?$/)){
+base=src.substring(0,src.lastIndexOf("MochiKit.js"));
+_834=_82e[i];
+}
+}
+if(base===null){
+return;
+}
+var _838=MochiKit.MochiKit.SUBMODULES;
+for(var i=0;i<_838.length;i++){
+if(MochiKit[_838[i]]){
+continue;
+}
+var uri=base+_838[i]+".js";
+if(uri in _835){
+continue;
+}
+if(_834.namespaceURI==_830||_834.namespaceURI==_832){
+var s=document.createElementNS(_834.namespaceURI,"script");
+s.setAttribute("id","MochiKit_"+base+_838[i]);
+if(_834.namespaceURI==_830){
+s.setAttributeNS(_831,"href",uri);
+}else{
+s.setAttribute("src",uri);
+}
+s.setAttribute("type","application/x-javascript");
+_834.parentNode.appendChild(s);
+}else{
+document.write("<"+_834.nodeName+" src=\""+uri+"\" type=\"text/javascript\"></script>");
+}
+}
+})();
+}
+
+
diff --git a/paste/evalexception/media/debug.js b/paste/evalexception/media/debug.js
new file mode 100644
index 0000000..57f9df3
--- /dev/null
+++ b/paste/evalexception/media/debug.js
@@ -0,0 +1,161 @@
+function showFrame(anchor) {
+ var tbid = anchor.getAttribute('tbid');
+ var expanded = anchor.expanded;
+ if (expanded) {
+ MochiKit.DOM.hideElement(anchor.expandedElement);
+ anchor.expanded = false;
+ _swapImage(anchor);
+ return false;
+ }
+ anchor.expanded = true;
+ if (anchor.expandedElement) {
+ MochiKit.DOM.showElement(anchor.expandedElement);
+ _swapImage(anchor);
+ $('debug_input_'+tbid).focus();
+ return false;
+ }
+ var url = debug_base
+ + '/show_frame?tbid=' + tbid
+ + '&debugcount=' + debug_count;
+ var d = MochiKit.Async.doSimpleXMLHttpRequest(url);
+ d.addCallbacks(function (data) {
+ var el = MochiKit.DOM.DIV({});
+ anchor.parentNode.insertBefore(el, anchor.nextSibling);
+ el.innerHTML = data.responseText;
+ anchor.expandedElement = el;
+ _swapImage(anchor);
+ $('debug_input_'+tbid).focus();
+ }, function (error) {
+ showError(error.req.responseText);
+ });
+ return false;
+}
+
+function _swapImage(anchor) {
+ var el = anchor.getElementsByTagName('IMG')[0];
+ if (anchor.expanded) {
+ var img = 'minus.jpg';
+ } else {
+ var img = 'plus.jpg';
+ }
+ el.src = debug_base + '/media/' + img;
+}
+
+function submitInput(button, tbid) {
+ var input = $(button.getAttribute('input-from'));
+ var output = $(button.getAttribute('output-to'));
+ var url = debug_base
+ + '/exec_input';
+ var history = input.form.history;
+ input.historyPosition = 0;
+ if (! history) {
+ history = input.form.history = [];
+ }
+ history.push(input.value);
+ var vars = {
+ tbid: tbid,
+ debugcount: debug_count,
+ input: input.value
+ };
+ MochiKit.DOM.showElement(output);
+ var d = MochiKit.Async.doSimpleXMLHttpRequest(url, vars);
+ d.addCallbacks(function (data) {
+ var result = data.responseText;
+ output.innerHTML += result;
+ input.value = '';
+ input.focus();
+ }, function (error) {
+ showError(error.req.responseText);
+ });
+ return false;
+}
+
+function showError(msg) {
+ var el = $('error-container');
+ if (el.innerHTML) {
+ el.innerHTML += '<hr noshade>\n' + msg;
+ } else {
+ el.innerHTML = msg;
+ }
+ MochiKit.DOM.showElement('error-area');
+}
+
+function clearError() {
+ var el = $('error-container');
+ el.innerHTML = '';
+ MochiKit.DOM.hideElement('error-area');
+}
+
+function expandInput(button) {
+ var input = button.form.elements.input;
+ stdops = {
+ name: 'input',
+ style: 'width: 100%',
+ autocomplete: 'off'
+ };
+ if (input.tagName == 'INPUT') {
+ var newEl = MochiKit.DOM.TEXTAREA(stdops);
+ var text = 'Contract';
+ } else {
+ stdops['type'] = 'text';
+ stdops['onkeypress'] = 'upArrow(this)';
+ var newEl = MochiKit.DOM.INPUT(stdops);
+ var text = 'Expand';
+ }
+ newEl.value = input.value;
+ newEl.id = input.id;
+ MochiKit.DOM.swapDOM(input, newEl);
+ newEl.focus();
+ button.value = text;
+ return false;
+}
+
+function upArrow(input, event) {
+ if (window.event) {
+ event = window.event;
+ }
+ if (event.keyCode != 38 && event.keyCode != 40) {
+ // not an up- or down-arrow
+ return true;
+ }
+ var dir = event.keyCode == 38 ? 1 : -1;
+ var history = input.form.history;
+ if (! history) {
+ history = input.form.history = [];
+ }
+ var pos = input.historyPosition || 0;
+ if (! pos && dir == -1) {
+ return true;
+ }
+ if (! pos && input.value) {
+ history.push(input.value);
+ pos = 1;
+ }
+ pos += dir;
+ if (history.length-pos < 0) {
+ pos = 1;
+ }
+ if (history.length-pos > history.length-1) {
+ input.value = '';
+ return true;
+ }
+ input.historyPosition = pos;
+ var line = history[history.length-pos];
+ input.value = line;
+}
+
+function expandLong(anchor) {
+ var span = anchor;
+ while (span) {
+ if (span.style && span.style.display == 'none') {
+ break;
+ }
+ span = span.nextSibling;
+ }
+ if (! span) {
+ return false;
+ }
+ MochiKit.DOM.showElement(span);
+ MochiKit.DOM.hideElement(anchor);
+ return false;
+}
diff --git a/paste/evalexception/media/minus.jpg b/paste/evalexception/media/minus.jpg
new file mode 100644
index 0000000..05f3306
--- /dev/null
+++ b/paste/evalexception/media/minus.jpg
Binary files differ
diff --git a/paste/evalexception/media/plus.jpg b/paste/evalexception/media/plus.jpg
new file mode 100644
index 0000000..a17aa5e
--- /dev/null
+++ b/paste/evalexception/media/plus.jpg
Binary files differ
diff --git a/paste/evalexception/middleware.py b/paste/evalexception/middleware.py
new file mode 100644
index 0000000..4349b88
--- /dev/null
+++ b/paste/evalexception/middleware.py
@@ -0,0 +1,610 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Exception-catching middleware that allows interactive debugging.
+
+This middleware catches all unexpected exceptions. A normal
+traceback, like produced by
+``paste.exceptions.errormiddleware.ErrorMiddleware`` is given, plus
+controls to see local variables and evaluate expressions in a local
+context.
+
+This can only be used in single-process environments, because
+subsequent requests must go back to the same process that the
+exception originally occurred in. Threaded or non-concurrent
+environments both work.
+
+This shouldn't be used in production in any way. That would just be
+silly.
+
+If calling from an XMLHttpRequest call, if the GET variable ``_`` is
+given then it will make the response more compact (and less
+Javascripty), since if you use innerHTML it'll kill your browser. You
+can look for the header X-Debug-URL in your 500 responses if you want
+to see the full debuggable traceback. Also, this URL is printed to
+``wsgi.errors``, so you can open it up in another browser window.
+"""
+import sys
+import os
+import cgi
+import traceback
+from cStringIO import StringIO
+import pprint
+import itertools
+import time
+import re
+from paste.exceptions import errormiddleware, formatter, collector
+from paste import wsgilib
+from paste import urlparser
+from paste import httpexceptions
+from paste import registry
+from paste import request
+from paste import response
+import evalcontext
+
+limit = 200
+
+def html_quote(v):
+ """
+ Escape HTML characters, plus translate None to ''
+ """
+ if v is None:
+ return ''
+ return cgi.escape(str(v), 1)
+
+def preserve_whitespace(v, quote=True):
+ """
+ Quote a value for HTML, preserving whitespace (translating
+ newlines to ``<br>`` and multiple spaces to use ``&nbsp;``).
+
+ If ``quote`` is true, then the value will be HTML quoted first.
+ """
+ if quote:
+ v = html_quote(v)
+ v = v.replace('\n', '<br>\n')
+ v = re.sub(r'()( +)', _repl_nbsp, v)
+ v = re.sub(r'(\n)( +)', _repl_nbsp, v)
+ v = re.sub(r'^()( +)', _repl_nbsp, v)
+ return '<code>%s</code>' % v
+
+def _repl_nbsp(match):
+ if len(match.group(2)) == 1:
+ return '&nbsp;'
+ return match.group(1) + '&nbsp;' * (len(match.group(2))-1) + ' '
+
+def simplecatcher(application):
+ """
+ A simple middleware that catches errors and turns them into simple
+ tracebacks.
+ """
+ def simplecatcher_app(environ, start_response):
+ try:
+ return application(environ, start_response)
+ except:
+ out = StringIO()
+ traceback.print_exc(file=out)
+ start_response('500 Server Error',
+ [('content-type', 'text/html')],
+ sys.exc_info())
+ res = out.getvalue()
+ return ['<h3>Error</h3><pre>%s</pre>'
+ % html_quote(res)]
+ return simplecatcher_app
+
+def wsgiapp():
+ """
+ Turns a function or method into a WSGI application.
+ """
+ def decorator(func):
+ def wsgiapp_wrapper(*args):
+ # we get 3 args when this is a method, two when it is
+ # a function :(
+ if len(args) == 3:
+ environ = args[1]
+ start_response = args[2]
+ args = [args[0]]
+ else:
+ environ, start_response = args
+ args = []
+ def application(environ, start_response):
+ form = wsgilib.parse_formvars(environ,
+ include_get_vars=True)
+ headers = response.HeaderDict(
+ {'content-type': 'text/html',
+ 'status': '200 OK'})
+ form['environ'] = environ
+ form['headers'] = headers
+ res = func(*args, **form.mixed())
+ status = headers.pop('status')
+ start_response(status, headers.headeritems())
+ return [res]
+ app = httpexceptions.make_middleware(application)
+ app = simplecatcher(app)
+ return app(environ, start_response)
+ wsgiapp_wrapper.exposed = True
+ return wsgiapp_wrapper
+ return decorator
+
+def get_debug_info(func):
+ """
+ A decorator (meant to be used under ``wsgiapp()``) that resolves
+ the ``debugcount`` variable to a ``DebugInfo`` object (or gives an
+ error if it can't be found).
+ """
+ def debug_info_replacement(self, **form):
+ try:
+ if 'debugcount' not in form:
+ raise ValueError('You must provide a debugcount parameter')
+ debugcount = form.pop('debugcount')
+ try:
+ debugcount = int(debugcount)
+ except ValueError:
+ raise ValueError('Bad value for debugcount')
+ if debugcount not in self.debug_infos:
+ raise ValueError(
+ 'Debug %s no longer found (maybe it has expired?)'
+ % debugcount)
+ debug_info = self.debug_infos[debugcount]
+ return func(self, debug_info=debug_info, **form)
+ except ValueError, e:
+ form['headers']['status'] = '500 Server Error'
+ return '<html>There was an error: %s</html>' % html_quote(e)
+ return debug_info_replacement
+
+debug_counter = itertools.count(int(time.time()))
+def get_debug_count(environ):
+ """
+ Return the unique debug count for the current request
+ """
+ if 'paste.evalexception.debug_count' in environ:
+ return environ['paste.evalexception.debug_count']
+ else:
+ environ['paste.evalexception.debug_count'] = next = debug_counter.next()
+ return next
+
+class EvalException(object):
+
+ def __init__(self, application, global_conf=None,
+ xmlhttp_key=None):
+ self.application = application
+ self.debug_infos = {}
+ if xmlhttp_key is None:
+ if global_conf is None:
+ xmlhttp_key = '_'
+ else:
+ xmlhttp_key = global_conf.get('xmlhttp_key', '_')
+ self.xmlhttp_key = xmlhttp_key
+
+ def __call__(self, environ, start_response):
+ assert not environ['wsgi.multiprocess'], (
+ "The EvalException middleware is not usable in a "
+ "multi-process environment")
+ environ['paste.evalexception'] = self
+ if environ.get('PATH_INFO', '').startswith('/_debug/'):
+ return self.debug(environ, start_response)
+ else:
+ return self.respond(environ, start_response)
+
+ def debug(self, environ, start_response):
+ assert request.path_info_pop(environ) == '_debug'
+ next_part = request.path_info_pop(environ)
+ method = getattr(self, next_part, None)
+ if not method:
+ exc = httpexceptions.HTTPNotFound(
+ '%r not found when parsing %r'
+ % (next_part, wsgilib.construct_url(environ)))
+ return exc.wsgi_application(environ, start_response)
+ if not getattr(method, 'exposed', False):
+ exc = httpexceptions.HTTPForbidden(
+ '%r not allowed' % next_part)
+ return exc.wsgi_application(environ, start_response)
+ return method(environ, start_response)
+
+ def media(self, environ, start_response):
+ """
+ Static path where images and other files live
+ """
+ app = urlparser.StaticURLParser(
+ os.path.join(os.path.dirname(__file__), 'media'))
+ return app(environ, start_response)
+ media.exposed = True
+
+ def mochikit(self, environ, start_response):
+ """
+ Static path where MochiKit lives
+ """
+ app = urlparser.StaticURLParser(
+ os.path.join(os.path.dirname(__file__), 'mochikit'))
+ return app(environ, start_response)
+ mochikit.exposed = True
+
+ def summary(self, environ, start_response):
+ """
+ Returns a JSON-format summary of all the cached
+ exception reports
+ """
+ start_response('200 OK', [('Content-type', 'text/x-json')])
+ data = [];
+ items = self.debug_infos.values()
+ items.sort(lambda a, b: cmp(a.created, b.created))
+ data = [item.json() for item in items]
+ return [repr(data)]
+ summary.exposed = True
+
+ def view(self, environ, start_response):
+ """
+ View old exception reports
+ """
+ id = int(request.path_info_pop(environ))
+ if id not in self.debug_infos:
+ start_response(
+ '500 Server Error',
+ [('Content-type', 'text/html')])
+ return [
+ "Traceback by id %s does not exist (maybe "
+ "the server has been restarted?)"
+ % id]
+ debug_info = self.debug_infos[id]
+ return debug_info.wsgi_application(environ, start_response)
+ view.exposed = True
+
+ def make_view_url(self, environ, base_path, count):
+ return base_path + '/_debug/view/%s' % count
+
+ #@wsgiapp()
+ #@get_debug_info
+ def show_frame(self, tbid, debug_info, **kw):
+ frame = debug_info.frame(int(tbid))
+ vars = frame.tb_frame.f_locals
+ if vars:
+ registry.restorer.restoration_begin(debug_info.counter)
+ local_vars = make_table(vars)
+ registry.restorer.restoration_end()
+ else:
+ local_vars = 'No local vars'
+ return input_form(tbid, debug_info) + local_vars
+
+ show_frame = wsgiapp()(get_debug_info(show_frame))
+
+ #@wsgiapp()
+ #@get_debug_info
+ def exec_input(self, tbid, debug_info, input, **kw):
+ if not input.strip():
+ return ''
+ input = input.rstrip() + '\n'
+ frame = debug_info.frame(int(tbid))
+ vars = frame.tb_frame.f_locals
+ glob_vars = frame.tb_frame.f_globals
+ context = evalcontext.EvalContext(vars, glob_vars)
+ registry.restorer.restoration_begin(debug_info.counter)
+ output = context.exec_expr(input)
+ registry.restorer.restoration_end()
+ input_html = formatter.str2html(input)
+ return ('<code style="color: #060">&gt;&gt;&gt;</code> '
+ '<code>%s</code><br>\n%s'
+ % (preserve_whitespace(input_html, quote=False),
+ preserve_whitespace(output)))
+
+ exec_input = wsgiapp()(get_debug_info(exec_input))
+
+ def respond(self, environ, start_response):
+ if environ.get('paste.throw_errors'):
+ return self.application(environ, start_response)
+ base_path = request.construct_url(environ, with_path_info=False,
+ with_query_string=False)
+ environ['paste.throw_errors'] = True
+ started = []
+ def detect_start_response(status, headers, exc_info=None):
+ try:
+ return start_response(status, headers, exc_info)
+ except:
+ raise
+ else:
+ started.append(True)
+ try:
+ __traceback_supplement__ = errormiddleware.Supplement, self, environ
+ app_iter = self.application(environ, detect_start_response)
+ try:
+ return_iter = list(app_iter)
+ return return_iter
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ except:
+ exc_info = sys.exc_info()
+ for expected in environ.get('paste.expected_exceptions', []):
+ if isinstance(exc_info[1], expected):
+ raise
+
+ # Tell the Registry to save its StackedObjectProxies current state
+ # for later restoration
+ registry.restorer.save_registry_state(environ)
+
+ count = get_debug_count(environ)
+ view_uri = self.make_view_url(environ, base_path, count)
+ if not started:
+ headers = [('content-type', 'text/html')]
+ headers.append(('X-Debug-URL', view_uri))
+ start_response('500 Internal Server Error',
+ headers,
+ exc_info)
+ environ['wsgi.errors'].write('Debug at: %s\n' % view_uri)
+
+ exc_data = collector.collect_exception(*exc_info)
+ debug_info = DebugInfo(count, exc_info, exc_data, base_path,
+ environ, view_uri)
+ assert count not in self.debug_infos
+ self.debug_infos[count] = debug_info
+
+ if self.xmlhttp_key:
+ get_vars = wsgilib.parse_querystring(environ)
+ if dict(get_vars).get(self.xmlhttp_key):
+ exc_data = collector.collect_exception(*exc_info)
+ html = formatter.format_html(
+ exc_data, include_hidden_frames=False,
+ include_reusable=False, show_extra_data=False)
+ return [html]
+
+ # @@: it would be nice to deal with bad content types here
+ return debug_info.content()
+
+ def exception_handler(self, exc_info, environ):
+ simple_html_error = False
+ if self.xmlhttp_key:
+ get_vars = wsgilib.parse_querystring(environ)
+ if dict(get_vars).get(self.xmlhttp_key):
+ simple_html_error = True
+ return errormiddleware.handle_exception(
+ exc_info, environ['wsgi.errors'],
+ html=True,
+ debug_mode=True,
+ simple_html_error=simple_html_error)
+
+class DebugInfo(object):
+
+ def __init__(self, counter, exc_info, exc_data, base_path,
+ environ, view_uri):
+ self.counter = counter
+ self.exc_data = exc_data
+ self.base_path = base_path
+ self.environ = environ
+ self.view_uri = view_uri
+ self.created = time.time()
+ self.exc_type, self.exc_value, self.tb = exc_info
+ __exception_formatter__ = 1
+ self.frames = []
+ n = 0
+ tb = self.tb
+ while tb is not None and (limit is None or n < limit):
+ if tb.tb_frame.f_locals.get('__exception_formatter__'):
+ # Stop recursion. @@: should make a fake ExceptionFrame
+ break
+ self.frames.append(tb)
+ tb = tb.tb_next
+ n += 1
+
+ def json(self):
+ """Return the JSON-able representation of this object"""
+ return {
+ 'uri': self.view_uri,
+ 'created': time.strftime('%c', time.gmtime(self.created)),
+ 'created_timestamp': self.created,
+ 'exception_type': str(self.exc_type),
+ 'exception': str(self.exc_value),
+ }
+
+ def frame(self, tbid):
+ for frame in self.frames:
+ if id(frame) == tbid:
+ return frame
+ else:
+ raise ValueError, (
+ "No frame by id %s found from %r" % (tbid, self.frames))
+
+ def wsgi_application(self, environ, start_response):
+ start_response('200 OK', [('content-type', 'text/html')])
+ return self.content()
+
+ def content(self):
+ html = format_eval_html(self.exc_data, self.base_path, self.counter)
+ head_html = (formatter.error_css + formatter.hide_display_js)
+ head_html += self.eval_javascript()
+ repost_button = make_repost_button(self.environ)
+ page = error_template % {
+ 'repost_button': repost_button or '',
+ 'head_html': head_html,
+ 'body': html}
+ return [page]
+
+ def eval_javascript(self):
+ base_path = self.base_path + '/_debug'
+ return (
+ '<script type="text/javascript" src="%s/media/MochiKit.packed.js">'
+ '</script>\n'
+ '<script type="text/javascript" src="%s/media/debug.js">'
+ '</script>\n'
+ '<script type="text/javascript">\n'
+ 'debug_base = %r;\n'
+ 'debug_count = %r;\n'
+ '</script>\n'
+ % (base_path, base_path, base_path, self.counter))
+
+class EvalHTMLFormatter(formatter.HTMLFormatter):
+
+ def __init__(self, base_path, counter, **kw):
+ super(EvalHTMLFormatter, self).__init__(**kw)
+ self.base_path = base_path
+ self.counter = counter
+
+ def format_source_line(self, filename, frame):
+ line = formatter.HTMLFormatter.format_source_line(
+ self, filename, frame)
+ return (line +
+ ' <a href="#" class="switch_source" '
+ 'tbid="%s" onClick="return showFrame(this)">&nbsp; &nbsp; '
+ '<img src="%s/_debug/media/plus.jpg" border=0 width=9 '
+ 'height=9> &nbsp; &nbsp;</a>'
+ % (frame.tbid, self.base_path))
+
+def make_table(items):
+ if isinstance(items, dict):
+ items = items.items()
+ items.sort()
+ rows = []
+ i = 0
+ for name, value in items:
+ i += 1
+ out = StringIO()
+ try:
+ pprint.pprint(value, out)
+ except Exception, e:
+ print >> out, 'Error: %s' % e
+ value = html_quote(out.getvalue())
+ if len(value) > 100:
+ # @@: This can actually break the HTML :(
+ # should I truncate before quoting?
+ orig_value = value
+ value = value[:100]
+ value += '<a class="switch_source" style="background-color: #999" href="#" onclick="return expandLong(this)">...</a>'
+ value += '<span style="display: none">%s</span>' % orig_value[100:]
+ value = formatter.make_wrappable(value)
+ if i % 2:
+ attr = ' class="even"'
+ else:
+ attr = ' class="odd"'
+ rows.append('<tr%s style="vertical-align: top;"><td>'
+ '<b>%s</b></td><td style="overflow: auto">%s<td></tr>'
+ % (attr, html_quote(name),
+ preserve_whitespace(value, quote=False)))
+ return '<table>%s</table>' % (
+ '\n'.join(rows))
+
+def format_eval_html(exc_data, base_path, counter):
+ short_formatter = EvalHTMLFormatter(
+ base_path=base_path,
+ counter=counter,
+ include_reusable=False)
+ short_er = short_formatter.format_collected_data(exc_data)
+ long_formatter = EvalHTMLFormatter(
+ base_path=base_path,
+ counter=counter,
+ show_hidden_frames=True,
+ show_extra_data=False,
+ include_reusable=False)
+ long_er = long_formatter.format_collected_data(exc_data)
+ text_er = formatter.format_text(exc_data, show_hidden_frames=True)
+ if short_formatter.filter_frames(exc_data.frames) != \
+ long_formatter.filter_frames(exc_data.frames):
+ # Only display the full traceback when it differs from the
+ # short version
+ full_traceback_html = """
+ <br>
+ <script type="text/javascript">
+ show_button('full_traceback', 'full traceback')
+ </script>
+ <div id="full_traceback" class="hidden-data">
+ %s
+ </div>
+ """ % long_er
+ else:
+ full_traceback_html = ''
+
+ return """
+ %s
+ %s
+ <br>
+ <script type="text/javascript">
+ show_button('text_version', 'text version')
+ </script>
+ <div id="text_version" class="hidden-data">
+ <textarea style="width: 100%%" rows=10 cols=60>%s</textarea>
+ </div>
+ """ % (short_er, full_traceback_html, cgi.escape(text_er))
+
+def make_repost_button(environ):
+ url = request.construct_url(environ)
+ if environ['REQUEST_METHOD'] == 'GET':
+ return ('<button onclick="window.location.href=%r">'
+ 'Re-GET Page</button><br>' % url)
+ else:
+ # @@: I'd like to reconstruct this, but I can't because
+ # the POST body is probably lost at this point, and
+ # I can't get it back :(
+ return None
+ # @@: Use or lose the following code block
+ """
+ fields = []
+ for name, value in wsgilib.parse_formvars(
+ environ, include_get_vars=False).items():
+ if hasattr(value, 'filename'):
+ # @@: Arg, we'll just submit the body, and leave out
+ # the filename :(
+ value = value.value
+ fields.append(
+ '<input type="hidden" name="%s" value="%s">'
+ % (html_quote(name), html_quote(value)))
+ return '''
+<form action="%s" method="POST">
+%s
+<input type="submit" value="Re-POST Page">
+</form>''' % (url, '\n'.join(fields))
+"""
+
+
+def input_form(tbid, debug_info):
+ return '''
+<form action="#" method="POST"
+ onsubmit="return submitInput($(\'submit_%(tbid)s\'), %(tbid)s)">
+<div id="exec-output-%(tbid)s" style="width: 95%%;
+ padding: 5px; margin: 5px; border: 2px solid #000;
+ display: none"></div>
+<input type="text" name="input" id="debug_input_%(tbid)s"
+ style="width: 100%%"
+ autocomplete="off" onkeypress="upArrow(this, event)"><br>
+<input type="submit" value="Execute" name="submitbutton"
+ onclick="return submitInput(this, %(tbid)s)"
+ id="submit_%(tbid)s"
+ input-from="debug_input_%(tbid)s"
+ output-to="exec-output-%(tbid)s">
+<input type="submit" value="Expand"
+ onclick="return expandInput(this)">
+</form>
+ ''' % {'tbid': tbid}
+
+error_template = '''
+<html>
+<head>
+ <title>Server Error</title>
+ %(head_html)s
+</head>
+<body>
+
+<div id="error-area" style="display: none; background-color: #600; color: #fff; border: 2px solid black">
+<div id="error-container"></div>
+<button onclick="return clearError()">clear this</button>
+</div>
+
+%(repost_button)s
+
+%(body)s
+
+</body>
+</html>
+'''
+
+def make_eval_exception(app, global_conf, xmlhttp_key=None):
+ """
+ Wraps the application in an interactive debugger.
+
+ This debugger is a major security hole, and should only be
+ used during development.
+
+ xmlhttp_key is a string that, if present in QUERY_STRING,
+ indicates that the request is an XMLHttp request, and the
+ Javascript/interactive debugger should not be returned. (If you
+ try to put the debugger somewhere with innerHTML, you will often
+ crash the browser)
+ """
+ if xmlhttp_key is None:
+ xmlhttp_key = global_conf.get('xmlhttp_key', '_')
+ return EvalException(app, xmlhttp_key=xmlhttp_key)
diff --git a/paste/exceptions/__init__.py b/paste/exceptions/__init__.py
new file mode 100644
index 0000000..813f855
--- /dev/null
+++ b/paste/exceptions/__init__.py
@@ -0,0 +1,6 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Package for catching exceptions and displaying annotated exception
+reports
+"""
diff --git a/paste/exceptions/collector.py b/paste/exceptions/collector.py
new file mode 100644
index 0000000..65f9ec0
--- /dev/null
+++ b/paste/exceptions/collector.py
@@ -0,0 +1,526 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+## Originally zExceptions.ExceptionFormatter from Zope;
+## Modified by Ian Bicking, Imaginary Landscape, 2005
+"""
+An exception collector that finds traceback information plus
+supplements
+"""
+
+import sys
+import traceback
+import time
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import linecache
+from paste.exceptions import serial_number_generator
+import warnings
+
+DEBUG_EXCEPTION_FORMATTER = True
+DEBUG_IDENT_PREFIX = 'E-'
+FALLBACK_ENCODING = 'UTF-8'
+
+__all__ = ['collect_exception', 'ExceptionCollector']
+
+class ExceptionCollector(object):
+
+ """
+ Produces a data structure that can be used by formatters to
+ display exception reports.
+
+ Magic variables:
+
+ If you define one of these variables in your local scope, you can
+ add information to tracebacks that happen in that context. This
+ allows applications to add all sorts of extra information about
+ the context of the error, including URLs, environmental variables,
+ users, hostnames, etc. These are the variables we look for:
+
+ ``__traceback_supplement__``:
+ You can define this locally or globally (unlike all the other
+ variables, which must be defined locally).
+
+ ``__traceback_supplement__`` is a tuple of ``(factory, arg1,
+ arg2...)``. When there is an exception, ``factory(arg1, arg2,
+ ...)`` is called, and the resulting object is inspected for
+ supplemental information.
+
+ ``__traceback_info__``:
+ This information is added to the traceback, usually fairly
+ literally.
+
+ ``__traceback_hide__``:
+ If set and true, this indicates that the frame should be
+ hidden from abbreviated tracebacks. This way you can hide
+ some of the complexity of the larger framework and let the
+ user focus on their own errors.
+
+ By setting it to ``'before'``, all frames before this one will
+ be thrown away. By setting it to ``'after'`` then all frames
+ after this will be thrown away until ``'reset'`` is found. In
+ each case the frame where it is set is included, unless you
+ append ``'_and_this'`` to the value (e.g.,
+ ``'before_and_this'``).
+
+ Note that formatters will ignore this entirely if the frame
+ that contains the error wouldn't normally be shown according
+ to these rules.
+
+ ``__traceback_reporter__``:
+ This should be a reporter object (see the reporter module),
+ or a list/tuple of reporter objects. All reporters found this
+ way will be given the exception, innermost first.
+
+ ``__traceback_decorator__``:
+ This object (defined in a local or global scope) will get the
+ result of this function (the CollectedException defined
+ below). It may modify this object in place, or return an
+ entirely new object. This gives the object the ability to
+ manipulate the traceback arbitrarily.
+
+ The actually interpretation of these values is largely up to the
+ reporters and formatters.
+
+ ``collect_exception(*sys.exc_info())`` will return an object with
+ several attributes:
+
+ ``frames``:
+ A list of frames
+ ``exception_formatted``:
+ The formatted exception, generally a full traceback
+ ``exception_type``:
+ The type of the exception, like ``ValueError``
+ ``exception_value``:
+ The string value of the exception, like ``'x not in list'``
+ ``identification_code``:
+ A hash of the exception data meant to identify the general
+ exception, so that it shares this code with other exceptions
+ that derive from the same problem. The code is a hash of
+ all the module names and function names in the traceback,
+ plus exception_type. This should be shown to users so they
+ can refer to the exception later. (@@: should it include a
+ portion that allows identification of the specific instance
+ of the exception as well?)
+
+ The list of frames goes innermost first. Each frame has these
+ attributes; some values may be None if they could not be
+ determined.
+
+ ``modname``:
+ the name of the module
+ ``filename``:
+ the filename of the module
+ ``lineno``:
+ the line of the error
+ ``revision``:
+ the contents of __version__ or __revision__
+ ``name``:
+ the function name
+ ``supplement``:
+ an object created from ``__traceback_supplement__``
+ ``supplement_exception``:
+ a simple traceback of any exception ``__traceback_supplement__``
+ created
+ ``traceback_info``:
+ the str() of any ``__traceback_info__`` variable found in the local
+ scope (@@: should it str()-ify it or not?)
+ ``traceback_hide``:
+ the value of any ``__traceback_hide__`` variable
+ ``traceback_log``:
+ the value of any ``__traceback_log__`` variable
+
+
+ ``__traceback_supplement__`` is thrown away, but a fixed
+ set of attributes are captured; each of these attributes is
+ optional.
+
+ ``object``:
+ the name of the object being visited
+ ``source_url``:
+ the original URL requested
+ ``line``:
+ the line of source being executed (for interpreters, like ZPT)
+ ``column``:
+ the column of source being executed
+ ``expression``:
+ the expression being evaluated (also for interpreters)
+ ``warnings``:
+ a list of (string) warnings to be displayed
+ ``getInfo``:
+ a function/method that takes no arguments, and returns a string
+ describing any extra information
+ ``extraData``:
+ a function/method that takes no arguments, and returns a
+ dictionary. The contents of this dictionary will not be
+ displayed in the context of the traceback, but globally for
+ the exception. Results will be grouped by the keys in the
+ dictionaries (which also serve as titles). The keys can also
+ be tuples of (importance, title); in this case the importance
+ should be ``important`` (shows up at top), ``normal`` (shows
+ up somewhere; unspecified), ``supplemental`` (shows up at
+ bottom), or ``extra`` (shows up hidden or not at all).
+
+ These are used to create an object with attributes of the same
+ names (``getInfo`` becomes a string attribute, not a method).
+ ``__traceback_supplement__`` implementations should be careful to
+ produce values that are relatively static and unlikely to cause
+ further errors in the reporting system -- any complex
+ introspection should go in ``getInfo()`` and should ultimately
+ return a string.
+
+ Note that all attributes are optional, and under certain
+ circumstances may be None or may not exist at all -- the collector
+ can only do a best effort, but must avoid creating any exceptions
+ itself.
+
+ Formatters may want to use ``__traceback_hide__`` as a hint to
+ hide frames that are part of the 'framework' or underlying system.
+ There are a variety of rules about special values for this
+ variables that formatters should be aware of.
+
+ TODO:
+
+ More attributes in __traceback_supplement__? Maybe an attribute
+ that gives a list of local variables that should also be
+ collected? Also, attributes that would be explicitly meant for
+ the entire request, not just a single frame. Right now some of
+ the fixed set of attributes (e.g., source_url) are meant for this
+ use, but there's no explicit way for the supplement to indicate
+ new values, e.g., logged-in user, HTTP referrer, environment, etc.
+ Also, the attributes that do exist are Zope/Web oriented.
+
+ More information on frames? cgitb, for instance, produces
+ extensive information on local variables. There exists the
+ possibility that getting this information may cause side effects,
+ which can make debugging more difficult; but it also provides
+ fodder for post-mortem debugging. However, the collector is not
+ meant to be configurable, but to capture everything it can and let
+ the formatters be configurable. Maybe this would have to be a
+ configuration value, or maybe it could be indicated by another
+ magical variable (which would probably mean 'show all local
+ variables below this frame')
+ """
+
+ show_revisions = 0
+
+ def __init__(self, limit=None):
+ self.limit = limit
+
+ def getLimit(self):
+ limit = self.limit
+ if limit is None:
+ limit = getattr(sys, 'tracebacklimit', None)
+ return limit
+
+ def getRevision(self, globals):
+ if not self.show_revisions:
+ return None
+ revision = globals.get('__revision__', None)
+ if revision is None:
+ # Incorrect but commonly used spelling
+ revision = globals.get('__version__', None)
+
+ if revision is not None:
+ try:
+ revision = str(revision).strip()
+ except:
+ revision = '???'
+ return revision
+
+ def collectSupplement(self, supplement, tb):
+ result = {}
+
+ for name in ('object', 'source_url', 'line', 'column',
+ 'expression', 'warnings'):
+ result[name] = getattr(supplement, name, None)
+
+ func = getattr(supplement, 'getInfo', None)
+ if func:
+ result['info'] = func()
+ else:
+ result['info'] = None
+ func = getattr(supplement, 'extraData', None)
+ if func:
+ result['extra'] = func()
+ else:
+ result['extra'] = None
+ return SupplementaryData(**result)
+
+ def collectLine(self, tb, extra_data):
+ f = tb.tb_frame
+ lineno = tb.tb_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ globals = f.f_globals
+ locals = f.f_locals
+ if not hasattr(locals, 'has_key'):
+ # Something weird about this frame; it's not a real dict
+ warnings.warn(
+ "Frame %s has an invalid locals(): %r" % (
+ globals.get('__name__', 'unknown'), locals))
+ locals = {}
+ data = {}
+ data['modname'] = globals.get('__name__', None)
+ data['filename'] = filename
+ data['lineno'] = lineno
+ data['revision'] = self.getRevision(globals)
+ data['name'] = name
+ data['tbid'] = id(tb)
+
+ # Output a traceback supplement, if any.
+ if locals.has_key('__traceback_supplement__'):
+ # Use the supplement defined in the function.
+ tbs = locals['__traceback_supplement__']
+ elif globals.has_key('__traceback_supplement__'):
+ # Use the supplement defined in the module.
+ # This is used by Scripts (Python).
+ tbs = globals['__traceback_supplement__']
+ else:
+ tbs = None
+ if tbs is not None:
+ factory = tbs[0]
+ args = tbs[1:]
+ try:
+ supp = factory(*args)
+ data['supplement'] = self.collectSupplement(supp, tb)
+ if data['supplement'].extra:
+ for key, value in data['supplement'].extra.items():
+ extra_data.setdefault(key, []).append(value)
+ except:
+ if DEBUG_EXCEPTION_FORMATTER:
+ out = StringIO()
+ traceback.print_exc(file=out)
+ text = out.getvalue()
+ data['supplement_exception'] = text
+ # else just swallow the exception.
+
+ try:
+ tbi = locals.get('__traceback_info__', None)
+ if tbi is not None:
+ data['traceback_info'] = str(tbi)
+ except:
+ pass
+
+ marker = []
+ for name in ('__traceback_hide__', '__traceback_log__',
+ '__traceback_decorator__'):
+ try:
+ tbh = locals.get(name, globals.get(name, marker))
+ if tbh is not marker:
+ data[name[2:-2]] = tbh
+ except:
+ pass
+
+ return data
+
+ def collectExceptionOnly(self, etype, value):
+ return traceback.format_exception_only(etype, value)
+
+ def collectException(self, etype, value, tb, limit=None):
+ # The next line provides a way to detect recursion.
+ __exception_formatter__ = 1
+ frames = []
+ ident_data = []
+ traceback_decorators = []
+ if limit is None:
+ limit = self.getLimit()
+ n = 0
+ extra_data = {}
+ while tb is not None and (limit is None or n < limit):
+ if tb.tb_frame.f_locals.get('__exception_formatter__'):
+ # Stop recursion. @@: should make a fake ExceptionFrame
+ frames.append('(Recursive formatException() stopped)\n')
+ break
+ data = self.collectLine(tb, extra_data)
+ frame = ExceptionFrame(**data)
+ frames.append(frame)
+ if frame.traceback_decorator is not None:
+ traceback_decorators.append(frame.traceback_decorator)
+ ident_data.append(frame.modname or '?')
+ ident_data.append(frame.name or '?')
+ tb = tb.tb_next
+ n = n + 1
+ ident_data.append(str(etype))
+ ident = serial_number_generator.hash_identifier(
+ ' '.join(ident_data), length=5, upper=True,
+ prefix=DEBUG_IDENT_PREFIX)
+
+ result = CollectedException(
+ frames=frames,
+ exception_formatted=self.collectExceptionOnly(etype, value),
+ exception_type=etype,
+ exception_value=self.safeStr(value),
+ identification_code=ident,
+ date=time.localtime(),
+ extra_data=extra_data)
+ if etype is ImportError:
+ extra_data[('important', 'sys.path')] = [sys.path]
+ for decorator in traceback_decorators:
+ try:
+ new_result = decorator(result)
+ if new_result is not None:
+ result = new_result
+ except:
+ pass
+ return result
+
+ def safeStr(self, obj):
+ try:
+ return str(obj)
+ except UnicodeEncodeError:
+ try:
+ return unicode(obj).encode(FALLBACK_ENCODING, 'replace')
+ except UnicodeEncodeError:
+ # This is when something is really messed up, but this can
+ # happen when the __str__ of an object has to handle unicode
+ return repr(obj)
+
+limit = 200
+
+class Bunch(object):
+
+ """
+ A generic container
+ """
+
+ def __init__(self, **attrs):
+ for name, value in attrs.items():
+ setattr(self, name, value)
+
+ def __repr__(self):
+ name = '<%s ' % self.__class__.__name__
+ name += ' '.join(['%s=%r' % (name, str(value)[:30])
+ for name, value in self.__dict__.items()
+ if not name.startswith('_')])
+ return name + '>'
+
+class CollectedException(Bunch):
+ """
+ This is the result of collection the exception; it contains copies
+ of data of interest.
+ """
+ # A list of frames (ExceptionFrame instances), innermost last:
+ frames = []
+ # The result of traceback.format_exception_only; this looks
+ # like a normal traceback you'd see in the interactive interpreter
+ exception_formatted = None
+ # The *string* representation of the type of the exception
+ # (@@: should we give the # actual class? -- we can't keep the
+ # actual exception around, but the class should be safe)
+ # Something like 'ValueError'
+ exception_type = None
+ # The string representation of the exception, from ``str(e)``.
+ exception_value = None
+ # An identifier which should more-or-less classify this particular
+ # exception, including where in the code it happened.
+ identification_code = None
+ # The date, as time.localtime() returns:
+ date = None
+ # A dictionary of supplemental data:
+ extra_data = {}
+
+class SupplementaryData(Bunch):
+ """
+ The result of __traceback_supplement__. We don't keep the
+ supplement object around, for fear of GC problems and whatnot.
+ (@@: Maybe I'm being too superstitious about copying only specific
+ information over)
+ """
+
+ # These attributes are copied from the object, or left as None
+ # if the object doesn't have these attributes:
+ object = None
+ source_url = None
+ line = None
+ column = None
+ expression = None
+ warnings = None
+ # This is the *return value* of supplement.getInfo():
+ info = None
+
+class ExceptionFrame(Bunch):
+ """
+ This represents one frame of the exception. Each frame is a
+ context in the call stack, typically represented by a line
+ number and module name in the traceback.
+ """
+
+ # The name of the module; can be None, especially when the code
+ # isn't associated with a module.
+ modname = None
+ # The filename (@@: when no filename, is it None or '?'?)
+ filename = None
+ # Line number
+ lineno = None
+ # The value of __revision__ or __version__ -- but only if
+ # show_revision = True (by defaut it is false). (@@: Why not
+ # collect this?)
+ revision = None
+ # The name of the function with the error (@@: None or '?' when
+ # unknown?)
+ name = None
+ # A SupplementaryData object, if __traceback_supplement__ was found
+ # (and produced no errors)
+ supplement = None
+ # If accessing __traceback_supplement__ causes any error, the
+ # plain-text traceback is stored here
+ supplement_exception = None
+ # The str() of any __traceback_info__ value found
+ traceback_info = None
+ # The value of __traceback_hide__
+ traceback_hide = False
+ # The value of __traceback_decorator__
+ traceback_decorator = None
+ # The id() of the traceback scope, can be used to reference the
+ # scope for use elsewhere
+ tbid = None
+
+ def get_source_line(self, context=0):
+ """
+ Return the source of the current line of this frame. You
+ probably want to .strip() it as well, as it is likely to have
+ leading whitespace.
+
+ If context is given, then that many lines on either side will
+ also be returned. E.g., context=1 will give 3 lines.
+ """
+ if not self.filename or not self.lineno:
+ return None
+ lines = []
+ for lineno in range(self.lineno-context, self.lineno+context+1):
+ lines.append(linecache.getline(self.filename, lineno))
+ return ''.join(lines)
+
+if hasattr(sys, 'tracebacklimit'):
+ limit = min(limit, sys.tracebacklimit)
+
+col = ExceptionCollector()
+
+def collect_exception(t, v, tb, limit=None):
+ """
+ Collection an exception from ``sys.exc_info()``.
+
+ Use like::
+
+ try:
+ blah blah
+ except:
+ exc_data = collect_exception(*sys.exc_info())
+ """
+ return col.collectException(t, v, tb, limit=limit)
diff --git a/paste/exceptions/errormiddleware.py b/paste/exceptions/errormiddleware.py
new file mode 100644
index 0000000..784414f
--- /dev/null
+++ b/paste/exceptions/errormiddleware.py
@@ -0,0 +1,460 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Error handler middleware
+"""
+import sys
+import traceback
+import cgi
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+from paste.exceptions import formatter, collector, reporter
+from paste import wsgilib
+from paste import request
+
+__all__ = ['ErrorMiddleware', 'handle_exception']
+
+class _NoDefault(object):
+ def __repr__(self):
+ return '<NoDefault>'
+NoDefault = _NoDefault()
+
+class ErrorMiddleware(object):
+
+ """
+ Error handling middleware
+
+ Usage::
+
+ error_catching_wsgi_app = ErrorMiddleware(wsgi_app)
+
+ Settings:
+
+ ``debug``:
+ If true, then tracebacks will be shown in the browser.
+
+ ``error_email``:
+ an email address (or list of addresses) to send exception
+ reports to
+
+ ``error_log``:
+ a filename to append tracebacks to
+
+ ``show_exceptions_in_wsgi_errors``:
+ If true, then errors will be printed to ``wsgi.errors``
+ (frequently a server error log, or stderr).
+
+ ``from_address``, ``smtp_server``, ``error_subject_prefix``, ``smtp_username``, ``smtp_password``, ``smtp_use_tls``:
+ variables to control the emailed exception reports
+
+ ``error_message``:
+ When debug mode is off, the error message to show to users.
+
+ ``xmlhttp_key``:
+ When this key (default ``_``) is in the request GET variables
+ (not POST!), expect that this is an XMLHttpRequest, and the
+ response should be more minimal; it should not be a complete
+ HTML page.
+
+ Environment Configuration:
+
+ ``paste.throw_errors``:
+ If this setting in the request environment is true, then this
+ middleware is disabled. This can be useful in a testing situation
+ where you don't want errors to be caught and transformed.
+
+ ``paste.expected_exceptions``:
+ When this middleware encounters an exception listed in this
+ environment variable and when the ``start_response`` has not
+ yet occurred, the exception will be re-raised instead of being
+ caught. This should generally be set by middleware that may
+ (but probably shouldn't be) installed above this middleware,
+ and wants to get certain exceptions. Exceptions raised after
+ ``start_response`` have been called are always caught since
+ by definition they are no longer expected.
+
+ """
+
+ def __init__(self, application, global_conf=None,
+ debug=NoDefault,
+ error_email=None,
+ error_log=None,
+ show_exceptions_in_wsgi_errors=NoDefault,
+ from_address=None,
+ smtp_server=None,
+ smtp_username=None,
+ smtp_password=None,
+ smtp_use_tls=False,
+ error_subject_prefix=None,
+ error_message=None,
+ xmlhttp_key=None):
+ from paste.util import converters
+ self.application = application
+ # @@: global_conf should be handled elsewhere in a separate
+ # function for the entry point
+ if global_conf is None:
+ global_conf = {}
+ if debug is NoDefault:
+ debug = converters.asbool(global_conf.get('debug'))
+ if show_exceptions_in_wsgi_errors is NoDefault:
+ show_exceptions_in_wsgi_errors = converters.asbool(global_conf.get('show_exceptions_in_wsgi_errors'))
+ self.debug_mode = converters.asbool(debug)
+ if error_email is None:
+ error_email = (global_conf.get('error_email')
+ or global_conf.get('admin_email')
+ or global_conf.get('webmaster_email')
+ or global_conf.get('sysadmin_email'))
+ self.error_email = converters.aslist(error_email)
+ self.error_log = error_log
+ self.show_exceptions_in_wsgi_errors = show_exceptions_in_wsgi_errors
+ if from_address is None:
+ from_address = global_conf.get('error_from_address', 'errors@localhost')
+ self.from_address = from_address
+ if smtp_server is None:
+ smtp_server = global_conf.get('smtp_server', 'localhost')
+ self.smtp_server = smtp_server
+ self.smtp_username = smtp_username or global_conf.get('smtp_username')
+ self.smtp_password = smtp_password or global_conf.get('smtp_password')
+ self.smtp_use_tls = smtp_use_tls or converters.asbool(global_conf.get('smtp_use_tls'))
+ self.error_subject_prefix = error_subject_prefix or ''
+ if error_message is None:
+ error_message = global_conf.get('error_message')
+ self.error_message = error_message
+ if xmlhttp_key is None:
+ xmlhttp_key = global_conf.get('xmlhttp_key', '_')
+ self.xmlhttp_key = xmlhttp_key
+
+ def __call__(self, environ, start_response):
+ """
+ The WSGI application interface.
+ """
+ # We want to be careful about not sending headers twice,
+ # and the content type that the app has committed to (if there
+ # is an exception in the iterator body of the response)
+ if environ.get('paste.throw_errors'):
+ return self.application(environ, start_response)
+ environ['paste.throw_errors'] = True
+
+ try:
+ __traceback_supplement__ = Supplement, self, environ
+ sr_checker = ResponseStartChecker(start_response)
+ app_iter = self.application(environ, sr_checker)
+ return self.make_catching_iter(app_iter, environ, sr_checker)
+ except:
+ exc_info = sys.exc_info()
+ try:
+ for expect in environ.get('paste.expected_exceptions', []):
+ if isinstance(exc_info[1], expect):
+ raise
+ start_response('500 Internal Server Error',
+ [('content-type', 'text/html')],
+ exc_info)
+ # @@: it would be nice to deal with bad content types here
+ response = self.exception_handler(exc_info, environ)
+ return [response]
+ finally:
+ # clean up locals...
+ exc_info = None
+
+ def make_catching_iter(self, app_iter, environ, sr_checker):
+ if isinstance(app_iter, (list, tuple)):
+ # These don't raise
+ return app_iter
+ return CatchingIter(app_iter, environ, sr_checker, self)
+
+ def exception_handler(self, exc_info, environ):
+ simple_html_error = False
+ if self.xmlhttp_key:
+ get_vars = wsgilib.parse_querystring(environ)
+ if dict(get_vars).get(self.xmlhttp_key):
+ simple_html_error = True
+ return handle_exception(
+ exc_info, environ['wsgi.errors'],
+ html=True,
+ debug_mode=self.debug_mode,
+ error_email=self.error_email,
+ error_log=self.error_log,
+ show_exceptions_in_wsgi_errors=self.show_exceptions_in_wsgi_errors,
+ error_email_from=self.from_address,
+ smtp_server=self.smtp_server,
+ smtp_username=self.smtp_username,
+ smtp_password=self.smtp_password,
+ smtp_use_tls=self.smtp_use_tls,
+ error_subject_prefix=self.error_subject_prefix,
+ error_message=self.error_message,
+ simple_html_error=simple_html_error)
+
+class ResponseStartChecker(object):
+ def __init__(self, start_response):
+ self.start_response = start_response
+ self.response_started = False
+
+ def __call__(self, *args):
+ self.response_started = True
+ self.start_response(*args)
+
+class CatchingIter(object):
+
+ """
+ A wrapper around the application iterator that will catch
+ exceptions raised by the a generator, or by the close method, and
+ display or report as necessary.
+ """
+
+ def __init__(self, app_iter, environ, start_checker, error_middleware):
+ self.app_iterable = app_iter
+ self.app_iterator = iter(app_iter)
+ self.environ = environ
+ self.start_checker = start_checker
+ self.error_middleware = error_middleware
+ self.closed = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ __traceback_supplement__ = (
+ Supplement, self.error_middleware, self.environ)
+ if self.closed:
+ raise StopIteration
+ try:
+ return self.app_iterator.next()
+ except StopIteration:
+ self.closed = True
+ close_response = self._close()
+ if close_response is not None:
+ return close_response
+ else:
+ raise StopIteration
+ except:
+ self.closed = True
+ close_response = self._close()
+ exc_info = sys.exc_info()
+ response = self.error_middleware.exception_handler(
+ exc_info, self.environ)
+ if close_response is not None:
+ response += (
+ '<hr noshade>Error in .close():<br>%s'
+ % close_response)
+
+ if not self.start_checker.response_started:
+ self.start_checker('500 Internal Server Error',
+ [('content-type', 'text/html')],
+ exc_info)
+
+ return response
+
+ def close(self):
+ # This should at least print something to stderr if the
+ # close method fails at this point
+ if not self.closed:
+ self._close()
+
+ def _close(self):
+ """Close and return any error message"""
+ if not hasattr(self.app_iterable, 'close'):
+ return None
+ try:
+ self.app_iterable.close()
+ return None
+ except:
+ close_response = self.error_middleware.exception_handler(
+ sys.exc_info(), self.environ)
+ return close_response
+
+
+class Supplement(object):
+
+ """
+ This is a supplement used to display standard WSGI information in
+ the traceback.
+ """
+
+ def __init__(self, middleware, environ):
+ self.middleware = middleware
+ self.environ = environ
+ self.source_url = request.construct_url(environ)
+
+ def extraData(self):
+ data = {}
+ cgi_vars = data[('extra', 'CGI Variables')] = {}
+ wsgi_vars = data[('extra', 'WSGI Variables')] = {}
+ hide_vars = ['paste.config', 'wsgi.errors', 'wsgi.input',
+ 'wsgi.multithread', 'wsgi.multiprocess',
+ 'wsgi.run_once', 'wsgi.version',
+ 'wsgi.url_scheme']
+ for name, value in self.environ.items():
+ if name.upper() == name:
+ if value:
+ cgi_vars[name] = value
+ elif name not in hide_vars:
+ wsgi_vars[name] = value
+ if self.environ['wsgi.version'] != (1, 0):
+ wsgi_vars['wsgi.version'] = self.environ['wsgi.version']
+ proc_desc = tuple([int(bool(self.environ[key]))
+ for key in ('wsgi.multiprocess',
+ 'wsgi.multithread',
+ 'wsgi.run_once')])
+ wsgi_vars['wsgi process'] = self.process_combos[proc_desc]
+ wsgi_vars['application'] = self.middleware.application
+ if 'paste.config' in self.environ:
+ data[('extra', 'Configuration')] = dict(self.environ['paste.config'])
+ return data
+
+ process_combos = {
+ # multiprocess, multithread, run_once
+ (0, 0, 0): 'Non-concurrent server',
+ (0, 1, 0): 'Multithreaded',
+ (1, 0, 0): 'Multiprocess',
+ (1, 1, 0): 'Multi process AND threads (?)',
+ (0, 0, 1): 'Non-concurrent CGI',
+ (0, 1, 1): 'Multithread CGI (?)',
+ (1, 0, 1): 'CGI',
+ (1, 1, 1): 'Multi thread/process CGI (?)',
+ }
+
+def handle_exception(exc_info, error_stream, html=True,
+ debug_mode=False,
+ error_email=None,
+ error_log=None,
+ show_exceptions_in_wsgi_errors=False,
+ error_email_from='errors@localhost',
+ smtp_server='localhost',
+ smtp_username=None,
+ smtp_password=None,
+ smtp_use_tls=False,
+ error_subject_prefix='',
+ error_message=None,
+ simple_html_error=False,
+ ):
+ """
+ For exception handling outside of a web context
+
+ Use like::
+
+ import sys
+ from paste.exceptions.errormiddleware import handle_exception
+ try:
+ do stuff
+ except:
+ handle_exception(
+ sys.exc_info(), sys.stderr, html=False, ...other config...)
+
+ If you want to report, but not fully catch the exception, call
+ ``raise`` after ``handle_exception``, which (when given no argument)
+ will reraise the exception.
+ """
+ reported = False
+ exc_data = collector.collect_exception(*exc_info)
+ extra_data = ''
+ if error_email:
+ rep = reporter.EmailReporter(
+ to_addresses=error_email,
+ from_address=error_email_from,
+ smtp_server=smtp_server,
+ smtp_username=smtp_username,
+ smtp_password=smtp_password,
+ smtp_use_tls=smtp_use_tls,
+ subject_prefix=error_subject_prefix)
+ rep_err = send_report(rep, exc_data, html=html)
+ if rep_err:
+ extra_data += rep_err
+ else:
+ reported = True
+ if error_log:
+ rep = reporter.LogReporter(
+ filename=error_log)
+ rep_err = send_report(rep, exc_data, html=html)
+ if rep_err:
+ extra_data += rep_err
+ else:
+ reported = True
+ if show_exceptions_in_wsgi_errors:
+ rep = reporter.FileReporter(
+ file=error_stream)
+ rep_err = send_report(rep, exc_data, html=html)
+ if rep_err:
+ extra_data += rep_err
+ else:
+ reported = True
+ else:
+ error_stream.write('Error - %s: %s\n' % (
+ exc_data.exception_type, exc_data.exception_value))
+ if html:
+ if debug_mode and simple_html_error:
+ return_error = formatter.format_html(
+ exc_data, include_hidden_frames=False,
+ include_reusable=False, show_extra_data=False)
+ reported = True
+ elif debug_mode and not simple_html_error:
+ error_html = formatter.format_html(
+ exc_data,
+ include_hidden_frames=True,
+ include_reusable=False)
+ head_html = formatter.error_css + formatter.hide_display_js
+ return_error = error_template(
+ head_html, error_html, extra_data)
+ extra_data = ''
+ reported = True
+ else:
+ msg = error_message or '''
+ An error occurred. See the error logs for more information.
+ (Turn debug on to display exception reports here)
+ '''
+ return_error = error_template('', msg, '')
+ else:
+ return_error = None
+ if not reported and error_stream:
+ err_report = formatter.format_text(exc_data, show_hidden_frames=True)
+ err_report += '\n' + '-'*60 + '\n'
+ error_stream.write(err_report)
+ if extra_data:
+ error_stream.write(extra_data)
+ return return_error
+
+def send_report(rep, exc_data, html=True):
+ try:
+ rep.report(exc_data)
+ except:
+ output = StringIO()
+ traceback.print_exc(file=output)
+ if html:
+ return """
+ <p>Additionally an error occurred while sending the %s report:
+
+ <pre>%s</pre>
+ </p>""" % (
+ cgi.escape(str(rep)), output.getvalue())
+ else:
+ return (
+ "Additionally an error occurred while sending the "
+ "%s report:\n%s" % (str(rep), output.getvalue()))
+ else:
+ return ''
+
+def error_template(head_html, exception, extra):
+ return '''
+ <html>
+ <head>
+ <title>Server Error</title>
+ %s
+ </head>
+ <body>
+ <h1>Server Error</h1>
+ %s
+ %s
+ </body>
+ </html>''' % (head_html, exception, extra)
+
+def make_error_middleware(app, global_conf, **kw):
+ return ErrorMiddleware(app, global_conf=global_conf, **kw)
+
+doc_lines = ErrorMiddleware.__doc__.splitlines(True)
+for i in range(len(doc_lines)):
+ if doc_lines[i].strip().startswith('Settings'):
+ make_error_middleware.__doc__ = ''.join(doc_lines[i:])
+ break
+del i, doc_lines
diff --git a/paste/exceptions/formatter.py b/paste/exceptions/formatter.py
new file mode 100644
index 0000000..e1fadbe
--- /dev/null
+++ b/paste/exceptions/formatter.py
@@ -0,0 +1,564 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Formatters for the exception data that comes from ExceptionCollector.
+"""
+# @@: TODO:
+# Use this: http://www.zope.org/Members/tino/VisualTraceback/VisualTracebackNews
+
+import cgi
+import re
+from paste.util import PySourceColor
+
+def html_quote(s):
+ return cgi.escape(str(s), True)
+
+class AbstractFormatter(object):
+
+ general_data_order = ['object', 'source_url']
+
+ def __init__(self, show_hidden_frames=False,
+ include_reusable=True,
+ show_extra_data=True,
+ trim_source_paths=()):
+ self.show_hidden_frames = show_hidden_frames
+ self.trim_source_paths = trim_source_paths
+ self.include_reusable = include_reusable
+ self.show_extra_data = show_extra_data
+
+ def format_collected_data(self, exc_data):
+ general_data = {}
+ if self.show_extra_data:
+ for name, value_list in exc_data.extra_data.items():
+ if isinstance(name, tuple):
+ importance, title = name
+ else:
+ importance, title = 'normal', name
+ for value in value_list:
+ general_data[(importance, name)] = self.format_extra_data(
+ importance, title, value)
+ lines = []
+ frames = self.filter_frames(exc_data.frames)
+ for frame in frames:
+ sup = frame.supplement
+ if sup:
+ if sup.object:
+ general_data[('important', 'object')] = self.format_sup_object(
+ sup.object)
+ if sup.source_url:
+ general_data[('important', 'source_url')] = self.format_sup_url(
+ sup.source_url)
+ if sup.line:
+ lines.append(self.format_sup_line_pos(sup.line, sup.column))
+ if sup.expression:
+ lines.append(self.format_sup_expression(sup.expression))
+ if sup.warnings:
+ for warning in sup.warnings:
+ lines.append(self.format_sup_warning(warning))
+ if sup.info:
+ lines.extend(self.format_sup_info(sup.info))
+ if frame.supplement_exception:
+ lines.append('Exception in supplement:')
+ lines.append(self.quote_long(frame.supplement_exception))
+ if frame.traceback_info:
+ lines.append(self.format_traceback_info(frame.traceback_info))
+ filename = frame.filename
+ if filename and self.trim_source_paths:
+ for path, repl in self.trim_source_paths:
+ if filename.startswith(path):
+ filename = repl + filename[len(path):]
+ break
+ lines.append(self.format_source_line(filename or '?', frame))
+ source = frame.get_source_line()
+ long_source = frame.get_source_line(2)
+ if source:
+ lines.append(self.format_long_source(
+ source, long_source))
+ etype = exc_data.exception_type
+ if not isinstance(etype, basestring):
+ etype = etype.__name__
+ exc_info = self.format_exception_info(
+ etype,
+ exc_data.exception_value)
+ data_by_importance = {'important': [], 'normal': [],
+ 'supplemental': [], 'extra': []}
+ for (importance, name), value in general_data.items():
+ data_by_importance[importance].append(
+ (name, value))
+ for value in data_by_importance.values():
+ value.sort()
+ return self.format_combine(data_by_importance, lines, exc_info)
+
+ def filter_frames(self, frames):
+ """
+ Removes any frames that should be hidden, according to the
+ values of traceback_hide, self.show_hidden_frames, and the
+ hidden status of the final frame.
+ """
+ if self.show_hidden_frames:
+ return frames
+ new_frames = []
+ hidden = False
+ for frame in frames:
+ hide = frame.traceback_hide
+ # @@: It would be nice to signal a warning if an unknown
+ # hide string was used, but I'm not sure where to put
+ # that warning.
+ if hide == 'before':
+ new_frames = []
+ hidden = False
+ elif hide == 'before_and_this':
+ new_frames = []
+ hidden = False
+ continue
+ elif hide == 'reset':
+ hidden = False
+ elif hide == 'reset_and_this':
+ hidden = False
+ continue
+ elif hide == 'after':
+ hidden = True
+ elif hide == 'after_and_this':
+ hidden = True
+ continue
+ elif hide:
+ continue
+ elif hidden:
+ continue
+ new_frames.append(frame)
+ if frames[-1] not in new_frames:
+ # We must include the last frame; that we don't indicates
+ # that the error happened where something was "hidden",
+ # so we just have to show everything
+ return frames
+ return new_frames
+
+ def pretty_string_repr(self, s):
+ """
+ Formats the string as a triple-quoted string when it contains
+ newlines.
+ """
+ if '\n' in s:
+ s = repr(s)
+ s = s[0]*3 + s[1:-1] + s[-1]*3
+ s = s.replace('\\n', '\n')
+ return s
+ else:
+ return repr(s)
+
+ def long_item_list(self, lst):
+ """
+ Returns true if the list contains items that are long, and should
+ be more nicely formatted.
+ """
+ how_many = 0
+ for item in lst:
+ if len(repr(item)) > 40:
+ how_many += 1
+ if how_many >= 3:
+ return True
+ return False
+
+class TextFormatter(AbstractFormatter):
+
+ def quote(self, s):
+ return s
+ def quote_long(self, s):
+ return s
+ def emphasize(self, s):
+ return s
+ def format_sup_object(self, obj):
+ return 'In object: %s' % self.emphasize(self.quote(repr(obj)))
+ def format_sup_url(self, url):
+ return 'URL: %s' % self.quote(url)
+ def format_sup_line_pos(self, line, column):
+ if column:
+ return self.emphasize('Line %i, Column %i' % (line, column))
+ else:
+ return self.emphasize('Line %i' % line)
+ def format_sup_expression(self, expr):
+ return self.emphasize('In expression: %s' % self.quote(expr))
+ def format_sup_warning(self, warning):
+ return 'Warning: %s' % self.quote(warning)
+ def format_sup_info(self, info):
+ return [self.quote_long(info)]
+ def format_source_line(self, filename, frame):
+ return 'File %r, line %s in %s' % (
+ filename, frame.lineno or '?', frame.name or '?')
+ def format_long_source(self, source, long_source):
+ return self.format_source(source)
+ def format_source(self, source_line):
+ return ' ' + self.quote(source_line.strip())
+ def format_exception_info(self, etype, evalue):
+ return self.emphasize(
+ '%s: %s' % (self.quote(etype), self.quote(evalue)))
+ def format_traceback_info(self, info):
+ return info
+
+ def format_combine(self, data_by_importance, lines, exc_info):
+ lines[:0] = [value for n, value in data_by_importance['important']]
+ lines.append(exc_info)
+ for name in 'normal', 'supplemental', 'extra':
+ lines.extend([value for n, value in data_by_importance[name]])
+ return self.format_combine_lines(lines)
+
+ def format_combine_lines(self, lines):
+ return '\n'.join(lines)
+
+ def format_extra_data(self, importance, title, value):
+ if isinstance(value, str):
+ s = self.pretty_string_repr(value)
+ if '\n' in s:
+ return '%s:\n%s' % (title, s)
+ else:
+ return '%s: %s' % (title, s)
+ elif isinstance(value, dict):
+ lines = ['\n', title, '-'*len(title)]
+ items = value.items()
+ items.sort()
+ for n, v in items:
+ try:
+ v = repr(v)
+ except Exception, e:
+ v = 'Cannot display: %s' % e
+ v = truncate(v)
+ lines.append(' %s: %s' % (n, v))
+ return '\n'.join(lines)
+ elif (isinstance(value, (list, tuple))
+ and self.long_item_list(value)):
+ parts = [truncate(repr(v)) for v in value]
+ return '%s: [\n %s]' % (
+ title, ',\n '.join(parts))
+ else:
+ return '%s: %s' % (title, truncate(repr(value)))
+
+class HTMLFormatter(TextFormatter):
+
+ def quote(self, s):
+ return html_quote(s)
+ def quote_long(self, s):
+ return '<pre>%s</pre>' % self.quote(s)
+ def emphasize(self, s):
+ return '<b>%s</b>' % s
+ def format_sup_url(self, url):
+ return 'URL: <a href="%s">%s</a>' % (url, url)
+ def format_combine_lines(self, lines):
+ return '<br>\n'.join(lines)
+ def format_source_line(self, filename, frame):
+ name = self.quote(frame.name or '?')
+ return 'Module <span class="module" title="%s">%s</span>:<b>%s</b> in <code>%s</code>' % (
+ filename, frame.modname or '?', frame.lineno or '?',
+ name)
+ return 'File %r, line %s in <tt>%s</tt>' % (
+ filename, frame.lineno, name)
+ def format_long_source(self, source, long_source):
+ q_long_source = str2html(long_source, False, 4, True)
+ q_source = str2html(source, True, 0, False)
+ return ('<code style="display: none" class="source" source-type="long"><a class="switch_source" onclick="return switch_source(this, \'long\')" href="#">&lt;&lt;&nbsp; </a>%s</code>'
+ '<code class="source" source-type="short"><a onclick="return switch_source(this, \'short\')" class="switch_source" href="#">&gt;&gt;&nbsp; </a>%s</code>'
+ % (q_long_source,
+ q_source))
+ def format_source(self, source_line):
+ return '&nbsp;&nbsp;<code class="source">%s</code>' % self.quote(source_line.strip())
+ def format_traceback_info(self, info):
+ return '<pre>%s</pre>' % self.quote(info)
+
+ def format_extra_data(self, importance, title, value):
+ if isinstance(value, str):
+ s = self.pretty_string_repr(value)
+ if '\n' in s:
+ return '%s:<br><pre>%s</pre>' % (title, self.quote(s))
+ else:
+ return '%s: <tt>%s</tt>' % (title, self.quote(s))
+ elif isinstance(value, dict):
+ return self.zebra_table(title, value)
+ elif (isinstance(value, (list, tuple))
+ and self.long_item_list(value)):
+ return '%s: <tt>[<br>\n&nbsp; &nbsp; %s]</tt>' % (
+ title, ',<br>&nbsp; &nbsp; '.join(map(self.quote, map(repr, value))))
+ else:
+ return '%s: <tt>%s</tt>' % (title, self.quote(repr(value)))
+
+ def format_combine(self, data_by_importance, lines, exc_info):
+ lines[:0] = [value for n, value in data_by_importance['important']]
+ lines.append(exc_info)
+ for name in 'normal', 'supplemental':
+ lines.extend([value for n, value in data_by_importance[name]])
+ if data_by_importance['extra']:
+ lines.append(
+ '<script type="text/javascript">\nshow_button(\'extra_data\', \'extra data\');\n</script>\n' +
+ '<div id="extra_data" class="hidden-data">\n')
+ lines.extend([value for n, value in data_by_importance['extra']])
+ lines.append('</div>')
+ text = self.format_combine_lines(lines)
+ if self.include_reusable:
+ return error_css + hide_display_js + text
+ else:
+ # Usually because another error is already on this page,
+ # and so the js & CSS are unneeded
+ return text
+
+ def zebra_table(self, title, rows, table_class="variables"):
+ if isinstance(rows, dict):
+ rows = rows.items()
+ rows.sort()
+ table = ['<table class="%s">' % table_class,
+ '<tr class="header"><th colspan="2">%s</th></tr>'
+ % self.quote(title)]
+ odd = False
+ for name, value in rows:
+ try:
+ value = repr(value)
+ except Exception, e:
+ value = 'Cannot print: %s' % e
+ odd = not odd
+ table.append(
+ '<tr class="%s"><td>%s</td>'
+ % (odd and 'odd' or 'even', self.quote(name)))
+ table.append(
+ '<td><tt>%s</tt></td></tr>'
+ % make_wrappable(self.quote(truncate(value))))
+ table.append('</table>')
+ return '\n'.join(table)
+
+hide_display_js = r'''
+<script type="text/javascript">
+function hide_display(id) {
+ var el = document.getElementById(id);
+ if (el.className == "hidden-data") {
+ el.className = "";
+ return true;
+ } else {
+ el.className = "hidden-data";
+ return false;
+ }
+}
+document.write('<style type="text/css">\n');
+document.write('.hidden-data {display: none}\n');
+document.write('</style>\n');
+function show_button(toggle_id, name) {
+ document.write('<a href="#' + toggle_id
+ + '" onclick="javascript:hide_display(\'' + toggle_id
+ + '\')" class="button">' + name + '</a><br>');
+}
+
+function switch_source(el, hide_type) {
+ while (el) {
+ if (el.getAttribute &&
+ el.getAttribute('source-type') == hide_type) {
+ break;
+ }
+ el = el.parentNode;
+ }
+ if (! el) {
+ return false;
+ }
+ el.style.display = 'none';
+ if (hide_type == 'long') {
+ while (el) {
+ if (el.getAttribute &&
+ el.getAttribute('source-type') == 'short') {
+ break;
+ }
+ el = el.nextSibling;
+ }
+ } else {
+ while (el) {
+ if (el.getAttribute &&
+ el.getAttribute('source-type') == 'long') {
+ break;
+ }
+ el = el.previousSibling;
+ }
+ }
+ if (el) {
+ el.style.display = '';
+ }
+ return false;
+}
+
+</script>'''
+
+
+error_css = """
+<style type="text/css">
+body {
+ font-family: Helvetica, sans-serif;
+}
+
+table {
+ width: 100%;
+}
+
+tr.header {
+ background-color: #006;
+ color: #fff;
+}
+
+tr.even {
+ background-color: #ddd;
+}
+
+table.variables td {
+ vertical-align: top;
+ overflow: auto;
+}
+
+a.button {
+ background-color: #ccc;
+ border: 2px outset #aaa;
+ color: #000;
+ text-decoration: none;
+}
+
+a.button:hover {
+ background-color: #ddd;
+}
+
+code.source {
+ color: #006;
+}
+
+a.switch_source {
+ color: #090;
+ text-decoration: none;
+}
+
+a.switch_source:hover {
+ background-color: #ddd;
+}
+
+.source-highlight {
+ background-color: #ff9;
+}
+
+</style>
+"""
+
+def format_html(exc_data, include_hidden_frames=False, **ops):
+ if not include_hidden_frames:
+ return HTMLFormatter(**ops).format_collected_data(exc_data)
+ short_er = format_html(exc_data, show_hidden_frames=False, **ops)
+ # @@: This should have a way of seeing if the previous traceback
+ # was actually trimmed at all
+ ops['include_reusable'] = False
+ ops['show_extra_data'] = False
+ long_er = format_html(exc_data, show_hidden_frames=True, **ops)
+ text_er = format_text(exc_data, show_hidden_frames=True, **ops)
+ return """
+ %s
+ <br>
+ <script type="text/javascript">
+ show_button('full_traceback', 'full traceback')
+ </script>
+ <div id="full_traceback" class="hidden-data">
+ %s
+ </div>
+ <br>
+ <script type="text/javascript">
+ show_button('text_version', 'text version')
+ </script>
+ <div id="text_version" class="hidden-data">
+ <textarea style="width: 100%%" rows=10 cols=60>%s</textarea>
+ </div>
+ """ % (short_er, long_er, cgi.escape(text_er))
+
+def format_text(exc_data, **ops):
+ return TextFormatter(**ops).format_collected_data(exc_data)
+
+whitespace_re = re.compile(r' +')
+pre_re = re.compile(r'</?pre.*?>')
+error_re = re.compile(r'<h3>ERROR: .*?</h3>')
+
+def str2html(src, strip=False, indent_subsequent=0,
+ highlight_inner=False):
+ """
+ Convert a string to HTML. Try to be really safe about it,
+ returning a quoted version of the string if nothing else works.
+ """
+ try:
+ return _str2html(src, strip=strip,
+ indent_subsequent=indent_subsequent,
+ highlight_inner=highlight_inner)
+ except:
+ return html_quote(src)
+
+def _str2html(src, strip=False, indent_subsequent=0,
+ highlight_inner=False):
+ if strip:
+ src = src.strip()
+ orig_src = src
+ try:
+ src = PySourceColor.str2html(src, form='snip')
+ src = error_re.sub('', src)
+ src = pre_re.sub('', src)
+ src = re.sub(r'^[\n\r]{0,1}', '', src)
+ src = re.sub(r'[\n\r]{0,1}$', '', src)
+ except:
+ src = html_quote(orig_src)
+ lines = src.splitlines()
+ if len(lines) == 1:
+ return lines[0]
+ indent = ' '*indent_subsequent
+ for i in range(1, len(lines)):
+ lines[i] = indent+lines[i]
+ if highlight_inner and i == len(lines)/2:
+ lines[i] = '<span class="source-highlight">%s</span>' % lines[i]
+ src = '<br>\n'.join(lines)
+ src = whitespace_re.sub(
+ lambda m: '&nbsp;'*(len(m.group(0))-1) + ' ', src)
+ return src
+
+def truncate(string, limit=1000):
+ """
+ Truncate the string to the limit number of
+ characters
+ """
+ if len(string) > limit:
+ return string[:limit-20]+'...'+string[-17:]
+ else:
+ return string
+
+def make_wrappable(html, wrap_limit=60,
+ split_on=';?&@!$#-/\\"\''):
+ # Currently using <wbr>, maybe should use &#8203;
+ # http://www.cs.tut.fi/~jkorpela/html/nobr.html
+ if len(html) <= wrap_limit:
+ return html
+ words = html.split()
+ new_words = []
+ for word in words:
+ wrapped_word = ''
+ while len(word) > wrap_limit:
+ for char in split_on:
+ if char in word:
+ first, rest = word.split(char, 1)
+ wrapped_word += first+char+'<wbr>'
+ word = rest
+ break
+ else:
+ for i in range(0, len(word), wrap_limit):
+ wrapped_word += word[i:i+wrap_limit]+'<wbr>'
+ word = ''
+ wrapped_word += word
+ new_words.append(wrapped_word)
+ return ' '.join(new_words)
+
+def make_pre_wrappable(html, wrap_limit=60,
+ split_on=';?&@!$#-/\\"\''):
+ """
+ Like ``make_wrappable()`` but intended for text that will
+ go in a ``<pre>`` block, so wrap on a line-by-line basis.
+ """
+ lines = html.splitlines()
+ new_lines = []
+ for line in lines:
+ if len(line) > wrap_limit:
+ for char in split_on:
+ if char in line:
+ parts = line.split(char)
+ line = '<wbr>'.join(parts)
+ break
+ new_lines.append(line)
+ return '\n'.join(lines)
diff --git a/paste/exceptions/reporter.py b/paste/exceptions/reporter.py
new file mode 100644
index 0000000..95e31ba
--- /dev/null
+++ b/paste/exceptions/reporter.py
@@ -0,0 +1,141 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+from email.MIMEText import MIMEText
+from email.MIMEMultipart import MIMEMultipart
+import smtplib
+import time
+try:
+ from socket import sslerror
+except ImportError:
+ sslerror = None
+from paste.exceptions import formatter
+
+class Reporter(object):
+
+ def __init__(self, **conf):
+ for name, value in conf.items():
+ if not hasattr(self, name):
+ raise TypeError(
+ "The keyword argument %s was not expected"
+ % name)
+ setattr(self, name, value)
+ self.check_params()
+
+ def check_params(self):
+ pass
+
+ def format_date(self, exc_data):
+ return time.strftime('%c', exc_data.date)
+
+ def format_html(self, exc_data, **kw):
+ return formatter.format_html(exc_data, **kw)
+
+ def format_text(self, exc_data, **kw):
+ return formatter.format_text(exc_data, **kw)
+
+class EmailReporter(Reporter):
+
+ to_addresses = None
+ from_address = None
+ smtp_server = 'localhost'
+ smtp_username = None
+ smtp_password = None
+ smtp_use_tls = False
+ subject_prefix = ''
+
+ def report(self, exc_data):
+ msg = self.assemble_email(exc_data)
+ server = smtplib.SMTP(self.smtp_server)
+ if self.smtp_use_tls:
+ server.ehlo()
+ server.starttls()
+ server.ehlo()
+ if self.smtp_username and self.smtp_password:
+ server.login(self.smtp_username, self.smtp_password)
+ server.sendmail(self.from_address,
+ self.to_addresses, msg.as_string())
+ try:
+ server.quit()
+ except sslerror:
+ # sslerror is raised in tls connections on closing sometimes
+ pass
+
+ def check_params(self):
+ if not self.to_addresses:
+ raise ValueError("You must set to_addresses")
+ if not self.from_address:
+ raise ValueError("You must set from_address")
+ if isinstance(self.to_addresses, (str, unicode)):
+ self.to_addresses = [self.to_addresses]
+
+ def assemble_email(self, exc_data):
+ short_html_version = self.format_html(
+ exc_data, show_hidden_frames=False)
+ long_html_version = self.format_html(
+ exc_data, show_hidden_frames=True)
+ text_version = self.format_text(
+ exc_data, show_hidden_frames=False)
+ msg = MIMEMultipart()
+ msg.set_type('multipart/alternative')
+ msg.preamble = msg.epilogue = ''
+ text_msg = MIMEText(text_version)
+ text_msg.set_type('text/plain')
+ text_msg.set_param('charset', 'ASCII')
+ msg.attach(text_msg)
+ html_msg = MIMEText(short_html_version)
+ html_msg.set_type('text/html')
+ # @@: Correct character set?
+ html_msg.set_param('charset', 'UTF-8')
+ html_long = MIMEText(long_html_version)
+ html_long.set_type('text/html')
+ html_long.set_param('charset', 'UTF-8')
+ msg.attach(html_msg)
+ msg.attach(html_long)
+ subject = '%s: %s' % (exc_data.exception_type,
+ formatter.truncate(str(exc_data.exception_value)))
+ msg['Subject'] = self.subject_prefix + subject
+ msg['From'] = self.from_address
+ msg['To'] = ', '.join(self.to_addresses)
+ return msg
+
+class LogReporter(Reporter):
+
+ filename = None
+ show_hidden_frames = True
+
+ def check_params(self):
+ assert self.filename is not None, (
+ "You must give a filename")
+
+ def report(self, exc_data):
+ text = self.format_text(
+ exc_data, show_hidden_frames=self.show_hidden_frames)
+ f = open(self.filename, 'a')
+ try:
+ f.write(text + '\n' + '-'*60 + '\n')
+ finally:
+ f.close()
+
+class FileReporter(Reporter):
+
+ file = None
+ show_hidden_frames = True
+
+ def check_params(self):
+ assert self.file is not None, (
+ "You must give a file object")
+
+ def report(self, exc_data):
+ text = self.format_text(
+ exc_data, show_hidden_frames=self.show_hidden_frames)
+ self.file.write(text + '\n' + '-'*60 + '\n')
+
+class WSGIAppReporter(Reporter):
+
+ def __init__(self, exc_data):
+ self.exc_data = exc_data
+
+ def __call__(self, environ, start_response):
+ start_response('500 Server Error', [('Content-type', 'text/html')])
+ return [formatter.format_html(self.exc_data)]
diff --git a/paste/exceptions/serial_number_generator.py b/paste/exceptions/serial_number_generator.py
new file mode 100644
index 0000000..0289663
--- /dev/null
+++ b/paste/exceptions/serial_number_generator.py
@@ -0,0 +1,123 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Creates a human-readable identifier, using numbers and digits,
+avoiding ambiguous numbers and letters. hash_identifier can be used
+to create compact representations that are unique for a certain string
+(or concatenation of strings)
+"""
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+good_characters = "23456789abcdefghjkmnpqrtuvwxyz"
+
+base = len(good_characters)
+
+def make_identifier(number):
+ """
+ Encodes a number as an identifier.
+ """
+ if not isinstance(number, (int, long)):
+ raise ValueError(
+ "You can only make identifiers out of integers (not %r)"
+ % number)
+ if number < 0:
+ raise ValueError(
+ "You cannot make identifiers out of negative numbers: %r"
+ % number)
+ result = []
+ while number:
+ next = number % base
+ result.append(good_characters[next])
+ # Note, this depends on integer rounding of results:
+ number = number / base
+ return ''.join(result)
+
+def hash_identifier(s, length, pad=True, hasher=md5, prefix='',
+ group=None, upper=False):
+ """
+ Hashes the string (with the given hashing module), then turns that
+ hash into an identifier of the given length (using modulo to
+ reduce the length of the identifier). If ``pad`` is False, then
+ the minimum-length identifier will be used; otherwise the
+ identifier will be padded with 0's as necessary.
+
+ ``prefix`` will be added last, and does not count towards the
+ target length. ``group`` will group the characters with ``-`` in
+ the given lengths, and also does not count towards the target
+ length. E.g., ``group=4`` will cause a identifier like
+ ``a5f3-hgk3-asdf``. Grouping occurs before the prefix.
+ """
+ if not callable(hasher):
+ # Accept sha/md5 modules as well as callables
+ hasher = hasher.new
+ if length > 26 and hasher is md5:
+ raise ValueError, (
+ "md5 cannot create hashes longer than 26 characters in "
+ "length (you gave %s)" % length)
+ if isinstance(s, unicode):
+ s = s.encode('utf-8')
+ h = hasher(str(s))
+ bin_hash = h.digest()
+ modulo = base ** length
+ number = 0
+ for c in list(bin_hash):
+ number = (number * 256 + ord(c)) % modulo
+ ident = make_identifier(number)
+ if pad:
+ ident = good_characters[0]*(length-len(ident)) + ident
+ if group:
+ parts = []
+ while ident:
+ parts.insert(0, ident[-group:])
+ ident = ident[:-group]
+ ident = '-'.join(parts)
+ if upper:
+ ident = ident.upper()
+ return prefix + ident
+
+# doctest tests:
+__test__ = {
+ 'make_identifier': """
+ >>> make_identifier(0)
+ ''
+ >>> make_identifier(1000)
+ 'c53'
+ >>> make_identifier(-100)
+ Traceback (most recent call last):
+ ...
+ ValueError: You cannot make identifiers out of negative numbers: -100
+ >>> make_identifier('test')
+ Traceback (most recent call last):
+ ...
+ ValueError: You can only make identifiers out of integers (not 'test')
+ >>> make_identifier(1000000000000)
+ 'c53x9rqh3'
+ """,
+ 'hash_identifier': """
+ >>> hash_identifier(0, 5)
+ 'cy2dr'
+ >>> hash_identifier(0, 10)
+ 'cy2dr6rg46'
+ >>> hash_identifier('this is a test of a long string', 5)
+ 'awatu'
+ >>> hash_identifier(0, 26)
+ 'cy2dr6rg46cx8t4w2f3nfexzk4'
+ >>> hash_identifier(0, 30)
+ Traceback (most recent call last):
+ ...
+ ValueError: md5 cannot create hashes longer than 26 characters in length (you gave 30)
+ >>> hash_identifier(0, 10, group=4)
+ 'cy-2dr6-rg46'
+ >>> hash_identifier(0, 10, group=4, upper=True, prefix='M-')
+ 'M-CY-2DR6-RG46'
+ """}
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/paste/fileapp.py b/paste/fileapp.py
new file mode 100644
index 0000000..8432511
--- /dev/null
+++ b/paste/fileapp.py
@@ -0,0 +1,354 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Ian Bicking, Clark C. Evans and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+This module handles sending static content such as in-memory data or
+files. At this time it has cache helpers and understands the
+if-modified-since request header.
+"""
+
+import os, time, mimetypes, zipfile, tarfile
+from paste.httpexceptions import *
+from paste.httpheaders import *
+
+CACHE_SIZE = 4096
+BLOCK_SIZE = 4096 * 16
+
+__all__ = ['DataApp', 'FileApp', 'DirectoryApp', 'ArchiveStore']
+
+class DataApp(object):
+ """
+ Returns an application that will send content in a single chunk,
+ this application has support for setting cache-control and for
+ responding to conditional (or HEAD) requests.
+
+ Constructor Arguments:
+
+ ``content`` the content being sent to the client
+
+ ``headers`` the headers to send /w the response
+
+ The remaining ``kwargs`` correspond to headers, where the
+ underscore is replaced with a dash. These values are only
+ added to the headers if they are not already provided; thus,
+ they can be used for default values. Examples include, but
+ are not limited to:
+
+ ``content_type``
+ ``content_encoding``
+ ``content_location``
+
+ ``cache_control()``
+
+ This method provides validated construction of the ``Cache-Control``
+ header as well as providing for automated filling out of the
+ ``EXPIRES`` header for HTTP/1.0 clients.
+
+ ``set_content()``
+
+ This method provides a mechanism to set the content after the
+ application has been constructed. This method does things
+ like changing ``Last-Modified`` and ``Content-Length`` headers.
+
+ """
+
+ allowed_methods = ('GET', 'HEAD')
+
+ def __init__(self, content, headers=None, allowed_methods=None,
+ **kwargs):
+ assert isinstance(headers, (type(None), list))
+ self.expires = None
+ self.content = None
+ self.content_length = None
+ self.last_modified = 0
+ if allowed_methods is not None:
+ self.allowed_methods = allowed_methods
+ self.headers = headers or []
+ for (k, v) in kwargs.items():
+ header = get_header(k)
+ header.update(self.headers, v)
+ ACCEPT_RANGES.update(self.headers, bytes=True)
+ if not CONTENT_TYPE(self.headers):
+ CONTENT_TYPE.update(self.headers)
+ if content is not None:
+ self.set_content(content)
+
+ def cache_control(self, **kwargs):
+ self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
+ return self
+
+ def set_content(self, content, last_modified=None):
+ assert content is not None
+ if last_modified is None:
+ self.last_modified = time.time()
+ else:
+ self.last_modified = last_modified
+ self.content = content
+ self.content_length = len(content)
+ LAST_MODIFIED.update(self.headers, time=self.last_modified)
+ return self
+
+ def content_disposition(self, **kwargs):
+ CONTENT_DISPOSITION.apply(self.headers, **kwargs)
+ return self
+
+ def __call__(self, environ, start_response):
+ method = environ['REQUEST_METHOD'].upper()
+ if method not in self.allowed_methods:
+ exc = HTTPMethodNotAllowed(
+ 'You cannot %s a file' % method,
+ headers=[('Allow', ','.join(self.allowed_methods))])
+ return exc(environ, start_response)
+ return self.get(environ, start_response)
+
+ def calculate_etag(self):
+ return '"%s-%s"' % (self.last_modified, self.content_length)
+
+ def get(self, environ, start_response):
+ headers = self.headers[:]
+ current_etag = self.calculate_etag()
+ ETAG.update(headers, current_etag)
+ if self.expires is not None:
+ EXPIRES.update(headers, delta=self.expires)
+
+ try:
+ client_etags = IF_NONE_MATCH.parse(environ)
+ if client_etags:
+ for etag in client_etags:
+ if etag == current_etag or etag == '*':
+ # horribly inefficient, n^2 performance, yuck!
+ for head in list_headers(entity=True):
+ head.delete(headers)
+ start_response('304 Not Modified', headers)
+ return ['']
+ except HTTPBadRequest, exce:
+ return exce.wsgi_application(environ, start_response)
+
+ # If we get If-None-Match and If-Modified-Since, and
+ # If-None-Match doesn't match, then we should not try to
+ # figure out If-Modified-Since (which has 1-second granularity
+ # and just isn't as accurate)
+ if not client_etags:
+ try:
+ client_clock = IF_MODIFIED_SINCE.parse(environ)
+ if client_clock >= int(self.last_modified):
+ # horribly inefficient, n^2 performance, yuck!
+ for head in list_headers(entity=True):
+ head.delete(headers)
+ start_response('304 Not Modified', headers)
+ return [''] # empty body
+ except HTTPBadRequest, exce:
+ return exce.wsgi_application(environ, start_response)
+
+ (lower, upper) = (0, self.content_length - 1)
+ range = RANGE.parse(environ)
+ if range and 'bytes' == range[0] and 1 == len(range[1]):
+ (lower, upper) = range[1][0]
+ upper = upper or (self.content_length - 1)
+ if upper >= self.content_length or lower > upper:
+ return HTTPRequestRangeNotSatisfiable((
+ "Range request was made beyond the end of the content,\r\n"
+ "which is %s long.\r\n Range: %s\r\n") % (
+ self.content_length, RANGE(environ))
+ ).wsgi_application(environ, start_response)
+
+ content_length = upper - lower + 1
+ CONTENT_RANGE.update(headers, first_byte=lower, last_byte=upper,
+ total_length = self.content_length)
+ CONTENT_LENGTH.update(headers, content_length)
+ if content_length == self.content_length:
+ start_response('200 OK', headers)
+ else:
+ start_response('206 Partial Content', headers)
+ if self.content is not None:
+ return [self.content[lower:upper+1]]
+ return (lower, content_length)
+
+class FileApp(DataApp):
+ """
+ Returns an application that will send the file at the given
+ filename. Adds a mime type based on ``mimetypes.guess_type()``.
+ See DataApp for the arguments beyond ``filename``.
+ """
+
+ def __init__(self, filename, headers=None, **kwargs):
+ self.filename = filename
+ content_type, content_encoding = self.guess_type()
+ if content_type and 'content_type' not in kwargs:
+ kwargs['content_type'] = content_type
+ if content_encoding and 'content_encoding' not in kwargs:
+ kwargs['content_encoding'] = content_encoding
+ DataApp.__init__(self, None, headers, **kwargs)
+
+ def guess_type(self):
+ return mimetypes.guess_type(self.filename)
+
+ def update(self, force=False):
+ stat = os.stat(self.filename)
+ if not force and stat.st_mtime == self.last_modified:
+ return
+ self.last_modified = stat.st_mtime
+ if stat.st_size < CACHE_SIZE:
+ fh = open(self.filename,"rb")
+ self.set_content(fh.read(), stat.st_mtime)
+ fh.close()
+ else:
+ self.content = None
+ self.content_length = stat.st_size
+ # This is updated automatically if self.set_content() is
+ # called
+ LAST_MODIFIED.update(self.headers, time=self.last_modified)
+
+ def get(self, environ, start_response):
+ is_head = environ['REQUEST_METHOD'].upper() == 'HEAD'
+ if 'max-age=0' in CACHE_CONTROL(environ).lower():
+ self.update(force=True) # RFC 2616 13.2.6
+ else:
+ self.update()
+ if not self.content:
+ if not os.path.exists(self.filename):
+ exc = HTTPNotFound(
+ 'The resource does not exist',
+ comment="No file at %r" % self.filename)
+ return exc(environ, start_response)
+ try:
+ file = open(self.filename, 'rb')
+ except (IOError, OSError), e:
+ exc = HTTPForbidden(
+ 'You are not permitted to view this file (%s)' % e)
+ return exc.wsgi_application(
+ environ, start_response)
+ retval = DataApp.get(self, environ, start_response)
+ if isinstance(retval, list):
+ # cached content, exception, or not-modified
+ if is_head:
+ return ['']
+ return retval
+ (lower, content_length) = retval
+ if is_head:
+ return ['']
+ file.seek(lower)
+ file_wrapper = environ.get('wsgi.file_wrapper', None)
+ if file_wrapper:
+ return file_wrapper(file, BLOCK_SIZE)
+ else:
+ return _FileIter(file, size=content_length)
+
+class _FileIter(object):
+
+ def __init__(self, file, block_size=None, size=None):
+ self.file = file
+ self.size = size
+ self.block_size = block_size or BLOCK_SIZE
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ chunk_size = self.block_size
+ if self.size is not None:
+ if chunk_size > self.size:
+ chunk_size = self.size
+ self.size -= chunk_size
+ data = self.file.read(chunk_size)
+ if not data:
+ raise StopIteration
+ return data
+
+ def close(self):
+ self.file.close()
+
+
+class DirectoryApp(object):
+ """
+ Returns an application that dispatches requests to corresponding FileApps based on PATH_INFO.
+ FileApp instances are cached. This app makes sure not to serve any files that are not in a subdirectory.
+ To customize FileApp creation override ``DirectoryApp.make_fileapp``
+ """
+
+ def __init__(self, path):
+ self.path = os.path.abspath(path)
+ if not self.path.endswith(os.path.sep):
+ self.path += os.path.sep
+ assert os.path.isdir(self.path)
+ self.cached_apps = {}
+
+ make_fileapp = FileApp
+
+ def __call__(self, environ, start_response):
+ path_info = environ['PATH_INFO']
+ app = self.cached_apps.get(path_info)
+ if app is None:
+ path = os.path.join(self.path, path_info.lstrip('/'))
+ if not os.path.normpath(path).startswith(self.path):
+ app = HTTPForbidden()
+ elif os.path.isfile(path):
+ app = self.make_fileapp(path)
+ self.cached_apps[path_info] = app
+ else:
+ app = HTTPNotFound(comment=path)
+ return app(environ, start_response)
+
+
+class ArchiveStore(object):
+ """
+ Returns an application that serves up a DataApp for items requested
+ in a given zip or tar archive.
+
+ Constructor Arguments:
+
+ ``filepath`` the path to the archive being served
+
+ ``cache_control()``
+
+ This method provides validated construction of the ``Cache-Control``
+ header as well as providing for automated filling out of the
+ ``EXPIRES`` header for HTTP/1.0 clients.
+ """
+
+ def __init__(self, filepath):
+ if zipfile.is_zipfile(filepath):
+ self.archive = zipfile.ZipFile(filepath,"r")
+ elif tarfile.is_tarfile(filepath):
+ self.archive = tarfile.TarFileCompat(filepath,"r")
+ else:
+ raise AssertionError("filepath '%s' is not a zip or tar " % filepath)
+ self.expires = None
+ self.last_modified = time.time()
+ self.cache = {}
+
+ def cache_control(self, **kwargs):
+ self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
+ return self
+
+ def __call__(self, environ, start_response):
+ path = environ.get("PATH_INFO","")
+ if path.startswith("/"):
+ path = path[1:]
+ application = self.cache.get(path)
+ if application:
+ return application(environ, start_response)
+ try:
+ info = self.archive.getinfo(path)
+ except KeyError:
+ exc = HTTPNotFound("The file requested, '%s', was not found." % path)
+ return exc.wsgi_application(environ, start_response)
+ if info.filename.endswith("/"):
+ exc = HTTPNotFound("Path requested, '%s', is not a file." % path)
+ return exc.wsgi_application(environ, start_response)
+ content_type, content_encoding = mimetypes.guess_type(info.filename)
+ # 'None' is not a valid content-encoding, so don't set the header if
+ # mimetypes.guess_type returns None
+ if content_encoding is not None:
+ app = DataApp(None, content_type = content_type,
+ content_encoding = content_encoding)
+ else:
+ app = DataApp(None, content_type = content_type)
+ app.set_content(self.archive.read(path),
+ time.mktime(info.date_time + (0,0,0)))
+ self.cache[path] = app
+ app.expires = self.expires
+ return app(environ, start_response)
+
diff --git a/paste/fixture.py b/paste/fixture.py
new file mode 100644
index 0000000..242a1de
--- /dev/null
+++ b/paste/fixture.py
@@ -0,0 +1,1725 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Routines for testing WSGI applications.
+
+Most interesting is the `TestApp <class-paste.fixture.TestApp.html>`_
+for testing WSGI applications, and the `TestFileEnvironment
+<class-paste.fixture.TestFileEnvironment.html>`_ class for testing the
+effects of command-line scripts.
+"""
+
+import sys
+import random
+import urllib
+import urlparse
+import mimetypes
+import time
+import cgi
+import os
+import shutil
+import smtplib
+import shlex
+from Cookie import BaseCookie
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import re
+try:
+ import subprocess
+except ImportError:
+ from paste.util import subprocess24 as subprocess
+
+from paste import wsgilib
+from paste import lint
+from paste.response import HeaderDict
+
+def tempnam_no_warning(*args):
+ """
+ An os.tempnam with the warning turned off, because sometimes
+ you just need to use this and don't care about the stupid
+ security warning.
+ """
+ return os.tempnam(*args)
+
+class NoDefault(object):
+ pass
+
+def sorted(l):
+ l = list(l)
+ l.sort()
+ return l
+
+class Dummy_smtplib(object):
+
+ existing = None
+
+ def __init__(self, server):
+ import warnings
+ warnings.warn(
+ 'Dummy_smtplib is not maintained and is deprecated',
+ DeprecationWarning, 2)
+ assert not self.existing, (
+ "smtplib.SMTP() called again before Dummy_smtplib.existing.reset() "
+ "called.")
+ self.server = server
+ self.open = True
+ self.__class__.existing = self
+
+ def quit(self):
+ assert self.open, (
+ "Called %s.quit() twice" % self)
+ self.open = False
+
+ def sendmail(self, from_address, to_addresses, msg):
+ self.from_address = from_address
+ self.to_addresses = to_addresses
+ self.message = msg
+
+ def install(cls):
+ smtplib.SMTP = cls
+
+ install = classmethod(install)
+
+ def reset(self):
+ assert not self.open, (
+ "SMTP connection not quit")
+ self.__class__.existing = None
+
+class AppError(Exception):
+ pass
+
+class TestApp(object):
+
+ # for py.test
+ disabled = True
+
+ def __init__(self, app, namespace=None, relative_to=None,
+ extra_environ=None, pre_request_hook=None,
+ post_request_hook=None):
+ """
+ Wraps a WSGI application in a more convenient interface for
+ testing.
+
+ ``app`` may be an application, or a Paste Deploy app
+ URI, like ``'config:filename.ini#test'``.
+
+ ``namespace`` is a dictionary that will be written to (if
+ provided). This can be used with doctest or some other
+ system, and the variable ``res`` will be assigned everytime
+ you make a request (instead of returning the request).
+
+ ``relative_to`` is a directory, and filenames used for file
+ uploads are calculated relative to this. Also ``config:``
+ URIs that aren't absolute.
+
+ ``extra_environ`` is a dictionary of values that should go
+ into the environment for each request. These can provide a
+ communication channel with the application.
+
+ ``pre_request_hook`` is a function to be called prior to
+ making requests (such as ``post`` or ``get``). This function
+ must take one argument (the instance of the TestApp).
+
+ ``post_request_hook`` is a function, similar to
+ ``pre_request_hook``, to be called after requests are made.
+ """
+ if isinstance(app, (str, unicode)):
+ from paste.deploy import loadapp
+ # @@: Should pick up relative_to from calling module's
+ # __file__
+ app = loadapp(app, relative_to=relative_to)
+ self.app = app
+ self.namespace = namespace
+ self.relative_to = relative_to
+ if extra_environ is None:
+ extra_environ = {}
+ self.extra_environ = extra_environ
+ self.pre_request_hook = pre_request_hook
+ self.post_request_hook = post_request_hook
+ self.reset()
+
+ def reset(self):
+ """
+ Resets the state of the application; currently just clears
+ saved cookies.
+ """
+ self.cookies = {}
+
+ def _make_environ(self):
+ environ = self.extra_environ.copy()
+ environ['paste.throw_errors'] = True
+ return environ
+
+ def get(self, url, params=None, headers=None, extra_environ=None,
+ status=None, expect_errors=False):
+ """
+ Get the given url (well, actually a path like
+ ``'/page.html'``).
+
+ ``params``:
+ A query string, or a dictionary that will be encoded
+ into a query string. You may also include a query
+ string on the ``url``.
+
+ ``headers``:
+ A dictionary of extra headers to send.
+
+ ``extra_environ``:
+ A dictionary of environmental variables that should
+ be added to the request.
+
+ ``status``:
+ The integer status code you expect (if not 200 or 3xx).
+ If you expect a 404 response, for instance, you must give
+ ``status=404`` or it will be an error. You can also give
+ a wildcard, like ``'3*'`` or ``'*'``.
+
+ ``expect_errors``:
+ If this is not true, then if anything is written to
+ ``wsgi.errors`` it will be an error. If it is true, then
+ non-200/3xx responses are also okay.
+
+ Returns a `response object
+ <class-paste.fixture.TestResponse.html>`_
+ """
+ if extra_environ is None:
+ extra_environ = {}
+ # Hide from py.test:
+ __tracebackhide__ = True
+ if params:
+ if not isinstance(params, (str, unicode)):
+ params = urllib.urlencode(params, doseq=True)
+ if '?' in url:
+ url += '&'
+ else:
+ url += '?'
+ url += params
+ environ = self._make_environ()
+ url = str(url)
+ if '?' in url:
+ url, environ['QUERY_STRING'] = url.split('?', 1)
+ else:
+ environ['QUERY_STRING'] = ''
+ self._set_headers(headers, environ)
+ environ.update(extra_environ)
+ req = TestRequest(url, environ, expect_errors)
+ return self.do_request(req, status=status)
+
+ def _gen_request(self, method, url, params='', headers=None, extra_environ=None,
+ status=None, upload_files=None, expect_errors=False):
+ """
+ Do a generic request.
+ """
+ if headers is None:
+ headers = {}
+ if extra_environ is None:
+ extra_environ = {}
+ environ = self._make_environ()
+ # @@: Should this be all non-strings?
+ if isinstance(params, (list, tuple, dict)):
+ params = urllib.urlencode(params)
+ if hasattr(params, 'items'):
+ # Some other multi-dict like format
+ params = urllib.urlencode(params.items())
+ if upload_files:
+ params = cgi.parse_qsl(params, keep_blank_values=True)
+ content_type, params = self.encode_multipart(
+ params, upload_files)
+ environ['CONTENT_TYPE'] = content_type
+ elif params:
+ environ.setdefault('CONTENT_TYPE', 'application/x-www-form-urlencoded')
+ if '?' in url:
+ url, environ['QUERY_STRING'] = url.split('?', 1)
+ else:
+ environ['QUERY_STRING'] = ''
+ environ['CONTENT_LENGTH'] = str(len(params))
+ environ['REQUEST_METHOD'] = method
+ environ['wsgi.input'] = StringIO(params)
+ self._set_headers(headers, environ)
+ environ.update(extra_environ)
+ req = TestRequest(url, environ, expect_errors)
+ return self.do_request(req, status=status)
+
+ def post(self, url, params='', headers=None, extra_environ=None,
+ status=None, upload_files=None, expect_errors=False):
+ """
+ Do a POST request. Very like the ``.get()`` method.
+ ``params`` are put in the body of the request.
+
+ ``upload_files`` is for file uploads. It should be a list of
+ ``[(fieldname, filename, file_content)]``. You can also use
+ just ``[(fieldname, filename)]`` and the file content will be
+ read from disk.
+
+ Returns a `response object
+ <class-paste.fixture.TestResponse.html>`_
+ """
+ return self._gen_request('POST', url, params=params, headers=headers,
+ extra_environ=extra_environ,status=status,
+ upload_files=upload_files,
+ expect_errors=expect_errors)
+
+ def put(self, url, params='', headers=None, extra_environ=None,
+ status=None, upload_files=None, expect_errors=False):
+ """
+ Do a PUT request. Very like the ``.get()`` method.
+ ``params`` are put in the body of the request.
+
+ ``upload_files`` is for file uploads. It should be a list of
+ ``[(fieldname, filename, file_content)]``. You can also use
+ just ``[(fieldname, filename)]`` and the file content will be
+ read from disk.
+
+ Returns a `response object
+ <class-paste.fixture.TestResponse.html>`_
+ """
+ return self._gen_request('PUT', url, params=params, headers=headers,
+ extra_environ=extra_environ,status=status,
+ upload_files=upload_files,
+ expect_errors=expect_errors)
+
+ def delete(self, url, params='', headers=None, extra_environ=None,
+ status=None, expect_errors=False):
+ """
+ Do a DELETE request. Very like the ``.get()`` method.
+ ``params`` are put in the body of the request.
+
+ Returns a `response object
+ <class-paste.fixture.TestResponse.html>`_
+ """
+ return self._gen_request('DELETE', url, params=params, headers=headers,
+ extra_environ=extra_environ,status=status,
+ upload_files=None, expect_errors=expect_errors)
+
+
+
+
+ def _set_headers(self, headers, environ):
+ """
+ Turn any headers into environ variables
+ """
+ if not headers:
+ return
+ for header, value in headers.items():
+ if header.lower() == 'content-type':
+ var = 'CONTENT_TYPE'
+ elif header.lower() == 'content-length':
+ var = 'CONTENT_LENGTH'
+ else:
+ var = 'HTTP_%s' % header.replace('-', '_').upper()
+ environ[var] = value
+
+ def encode_multipart(self, params, files):
+ """
+ Encodes a set of parameters (typically a name/value list) and
+ a set of files (a list of (name, filename, file_body)) into a
+ typical POST body, returning the (content_type, body).
+ """
+ boundary = '----------a_BoUnDaRy%s$' % random.random()
+ lines = []
+ for key, value in params:
+ lines.append('--'+boundary)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ lines.append(value)
+ for file_info in files:
+ key, filename, value = self._get_file_info(file_info)
+ lines.append('--'+boundary)
+ lines.append('Content-Disposition: form-data; name="%s"; filename="%s"'
+ % (key, filename))
+ fcontent = mimetypes.guess_type(filename)[0]
+ lines.append('Content-Type: %s' %
+ fcontent or 'application/octet-stream')
+ lines.append('')
+ lines.append(value)
+ lines.append('--' + boundary + '--')
+ lines.append('')
+ body = '\r\n'.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % boundary
+ return content_type, body
+
+ def _get_file_info(self, file_info):
+ if len(file_info) == 2:
+ # It only has a filename
+ filename = file_info[1]
+ if self.relative_to:
+ filename = os.path.join(self.relative_to, filename)
+ f = open(filename, 'rb')
+ content = f.read()
+ f.close()
+ return (file_info[0], filename, content)
+ elif len(file_info) == 3:
+ return file_info
+ else:
+ raise ValueError(
+ "upload_files need to be a list of tuples of (fieldname, "
+ "filename, filecontent) or (fieldname, filename); "
+ "you gave: %r"
+ % repr(file_info)[:100])
+
+ def do_request(self, req, status):
+ """
+ Executes the given request (``req``), with the expected
+ ``status``. Generally ``.get()`` and ``.post()`` are used
+ instead.
+ """
+ if self.pre_request_hook:
+ self.pre_request_hook(self)
+ __tracebackhide__ = True
+ if self.cookies:
+ c = BaseCookie()
+ for name, value in self.cookies.items():
+ c[name] = value
+ hc = '; '.join(['='.join([m.key, m.value]) for m in c.values()])
+ req.environ['HTTP_COOKIE'] = hc
+ req.environ['paste.testing'] = True
+ req.environ['paste.testing_variables'] = {}
+ app = lint.middleware(self.app)
+ old_stdout = sys.stdout
+ out = CaptureStdout(old_stdout)
+ try:
+ sys.stdout = out
+ start_time = time.time()
+ raise_on_wsgi_error = not req.expect_errors
+ raw_res = wsgilib.raw_interactive(
+ app, req.url,
+ raise_on_wsgi_error=raise_on_wsgi_error,
+ **req.environ)
+ end_time = time.time()
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr.write(out.getvalue())
+ res = self._make_response(raw_res, end_time - start_time)
+ res.request = req
+ for name, value in req.environ['paste.testing_variables'].items():
+ if hasattr(res, name):
+ raise ValueError(
+ "paste.testing_variables contains the variable %r, but "
+ "the response object already has an attribute by that "
+ "name" % name)
+ setattr(res, name, value)
+ if self.namespace is not None:
+ self.namespace['res'] = res
+ if not req.expect_errors:
+ self._check_status(status, res)
+ self._check_errors(res)
+ res.cookies_set = {}
+ for header in res.all_headers('set-cookie'):
+ c = BaseCookie(header)
+ for key, morsel in c.items():
+ self.cookies[key] = morsel.value
+ res.cookies_set[key] = morsel.value
+ if self.post_request_hook:
+ self.post_request_hook(self)
+ if self.namespace is None:
+ # It's annoying to return the response in doctests, as it'll
+ # be printed, so we only return it is we couldn't assign
+ # it anywhere
+ return res
+
+ def _check_status(self, status, res):
+ __tracebackhide__ = True
+ if status == '*':
+ return
+ if isinstance(status, (list, tuple)):
+ if res.status not in status:
+ raise AppError(
+ "Bad response: %s (not one of %s for %s)\n%s"
+ % (res.full_status, ', '.join(map(str, status)),
+ res.request.url, res.body))
+ return
+ if status is None:
+ if res.status >= 200 and res.status < 400:
+ return
+ raise AppError(
+ "Bad response: %s (not 200 OK or 3xx redirect for %s)\n%s"
+ % (res.full_status, res.request.url,
+ res.body))
+ if status != res.status:
+ raise AppError(
+ "Bad response: %s (not %s)" % (res.full_status, status))
+
+ def _check_errors(self, res):
+ if res.errors:
+ raise AppError(
+ "Application had errors logged:\n%s" % res.errors)
+
+ def _make_response(self, (status, headers, body, errors), total_time):
+ return TestResponse(self, status, headers, body, errors,
+ total_time)
+
+class CaptureStdout(object):
+
+ def __init__(self, actual):
+ self.captured = StringIO()
+ self.actual = actual
+
+ def write(self, s):
+ self.captured.write(s)
+ self.actual.write(s)
+
+ def flush(self):
+ self.actual.flush()
+
+ def writelines(self, lines):
+ for item in lines:
+ self.write(item)
+
+ def getvalue(self):
+ return self.captured.getvalue()
+
+class TestResponse(object):
+
+ # for py.test
+ disabled = True
+
+ """
+ Instances of this class are return by `TestApp
+ <class-paste.fixture.TestApp.html>`_
+ """
+
+ def __init__(self, test_app, status, headers, body, errors,
+ total_time):
+ self.test_app = test_app
+ self.status = int(status.split()[0])
+ self.full_status = status
+ self.headers = headers
+ self.header_dict = HeaderDict.fromlist(self.headers)
+ self.body = body
+ self.errors = errors
+ self._normal_body = None
+ self.time = total_time
+ self._forms_indexed = None
+
+ def forms__get(self):
+ """
+ Returns a dictionary of ``Form`` objects. Indexes are both in
+ order (from zero) and by form id (if the form is given an id).
+ """
+ if self._forms_indexed is None:
+ self._parse_forms()
+ return self._forms_indexed
+
+ forms = property(forms__get,
+ doc="""
+ A list of <form>s found on the page (instances of
+ `Form <class-paste.fixture.Form.html>`_)
+ """)
+
+ def form__get(self):
+ forms = self.forms
+ if not forms:
+ raise TypeError(
+ "You used response.form, but no forms exist")
+ if 1 in forms:
+ # There is more than one form
+ raise TypeError(
+ "You used response.form, but more than one form exists")
+ return forms[0]
+
+ form = property(form__get,
+ doc="""
+ Returns a single `Form
+ <class-paste.fixture.Form.html>`_ instance; it
+ is an error if there are multiple forms on the
+ page.
+ """)
+
+ _tag_re = re.compile(r'<(/?)([:a-z0-9_\-]*)(.*?)>', re.S|re.I)
+
+ def _parse_forms(self):
+ forms = self._forms_indexed = {}
+ form_texts = []
+ started = None
+ for match in self._tag_re.finditer(self.body):
+ end = match.group(1) == '/'
+ tag = match.group(2).lower()
+ if tag != 'form':
+ continue
+ if end:
+ assert started, (
+ "</form> unexpected at %s" % match.start())
+ form_texts.append(self.body[started:match.end()])
+ started = None
+ else:
+ assert not started, (
+ "Nested form tags at %s" % match.start())
+ started = match.start()
+ assert not started, (
+ "Danging form: %r" % self.body[started:])
+ for i, text in enumerate(form_texts):
+ form = Form(self, text)
+ forms[i] = form
+ if form.id:
+ forms[form.id] = form
+
+ def header(self, name, default=NoDefault):
+ """
+ Returns the named header; an error if there is not exactly one
+ matching header (unless you give a default -- always an error
+ if there is more than one header)
+ """
+ found = None
+ for cur_name, value in self.headers:
+ if cur_name.lower() == name.lower():
+ assert not found, (
+ "Ambiguous header: %s matches %r and %r"
+ % (name, found, value))
+ found = value
+ if found is None:
+ if default is NoDefault:
+ raise KeyError(
+ "No header found: %r (from %s)"
+ % (name, ', '.join([n for n, v in self.headers])))
+ else:
+ return default
+ return found
+
+ def all_headers(self, name):
+ """
+ Gets all headers by the ``name``, returns as a list
+ """
+ found = []
+ for cur_name, value in self.headers:
+ if cur_name.lower() == name.lower():
+ found.append(value)
+ return found
+
+ def follow(self, **kw):
+ """
+ If this request is a redirect, follow that redirect. It
+ is an error if this is not a redirect response. Returns
+ another response object.
+ """
+ assert self.status >= 300 and self.status < 400, (
+ "You can only follow redirect responses (not %s)"
+ % self.full_status)
+ location = self.header('location')
+ type, rest = urllib.splittype(location)
+ host, path = urllib.splithost(rest)
+ # @@: We should test that it's not a remote redirect
+ return self.test_app.get(location, **kw)
+
+ def click(self, description=None, linkid=None, href=None,
+ anchor=None, index=None, verbose=False):
+ """
+ Click the link as described. Each of ``description``,
+ ``linkid``, and ``url`` are *patterns*, meaning that they are
+ either strings (regular expressions), compiled regular
+ expressions (objects with a ``search`` method), or callables
+ returning true or false.
+
+ All the given patterns are ANDed together:
+
+ * ``description`` is a pattern that matches the contents of the
+ anchor (HTML and all -- everything between ``<a...>`` and
+ ``</a>``)
+
+ * ``linkid`` is a pattern that matches the ``id`` attribute of
+ the anchor. It will receive the empty string if no id is
+ given.
+
+ * ``href`` is a pattern that matches the ``href`` of the anchor;
+ the literal content of that attribute, not the fully qualified
+ attribute.
+
+ * ``anchor`` is a pattern that matches the entire anchor, with
+ its contents.
+
+ If more than one link matches, then the ``index`` link is
+ followed. If ``index`` is not given and more than one link
+ matches, or if no link matches, then ``IndexError`` will be
+ raised.
+
+ If you give ``verbose`` then messages will be printed about
+ each link, and why it does or doesn't match. If you use
+ ``app.click(verbose=True)`` you'll see a list of all the
+ links.
+
+ You can use multiple criteria to essentially assert multiple
+ aspects about the link, e.g., where the link's destination is.
+ """
+ __tracebackhide__ = True
+ found_html, found_desc, found_attrs = self._find_element(
+ tag='a', href_attr='href',
+ href_extract=None,
+ content=description,
+ id=linkid,
+ href_pattern=href,
+ html_pattern=anchor,
+ index=index, verbose=verbose)
+ return self.goto(found_attrs['uri'])
+
+ def clickbutton(self, description=None, buttonid=None, href=None,
+ button=None, index=None, verbose=False):
+ """
+ Like ``.click()``, except looks for link-like buttons.
+ This kind of button should look like
+ ``<button onclick="...location.href='url'...">``.
+ """
+ __tracebackhide__ = True
+ found_html, found_desc, found_attrs = self._find_element(
+ tag='button', href_attr='onclick',
+ href_extract=re.compile(r"location\.href='(.*?)'"),
+ content=description,
+ id=buttonid,
+ href_pattern=href,
+ html_pattern=button,
+ index=index, verbose=verbose)
+ return self.goto(found_attrs['uri'])
+
+ def _find_element(self, tag, href_attr, href_extract,
+ content, id,
+ href_pattern,
+ html_pattern,
+ index, verbose):
+ content_pat = _make_pattern(content)
+ id_pat = _make_pattern(id)
+ href_pat = _make_pattern(href_pattern)
+ html_pat = _make_pattern(html_pattern)
+
+ _tag_re = re.compile(r'<%s\s+(.*?)>(.*?)</%s>' % (tag, tag),
+ re.I+re.S)
+
+ def printlog(s):
+ if verbose:
+ print s
+
+ found_links = []
+ total_links = 0
+ for match in _tag_re.finditer(self.body):
+ el_html = match.group(0)
+ el_attr = match.group(1)
+ el_content = match.group(2)
+ attrs = _parse_attrs(el_attr)
+ if verbose:
+ printlog('Element: %r' % el_html)
+ if not attrs.get(href_attr):
+ printlog(' Skipped: no %s attribute' % href_attr)
+ continue
+ el_href = attrs[href_attr]
+ if href_extract:
+ m = href_extract.search(el_href)
+ if not m:
+ printlog(" Skipped: doesn't match extract pattern")
+ continue
+ el_href = m.group(1)
+ attrs['uri'] = el_href
+ if el_href.startswith('#'):
+ printlog(' Skipped: only internal fragment href')
+ continue
+ if el_href.startswith('javascript:'):
+ printlog(' Skipped: cannot follow javascript:')
+ continue
+ total_links += 1
+ if content_pat and not content_pat(el_content):
+ printlog(" Skipped: doesn't match description")
+ continue
+ if id_pat and not id_pat(attrs.get('id', '')):
+ printlog(" Skipped: doesn't match id")
+ continue
+ if href_pat and not href_pat(el_href):
+ printlog(" Skipped: doesn't match href")
+ continue
+ if html_pat and not html_pat(el_html):
+ printlog(" Skipped: doesn't match html")
+ continue
+ printlog(" Accepted")
+ found_links.append((el_html, el_content, attrs))
+ if not found_links:
+ raise IndexError(
+ "No matching elements found (from %s possible)"
+ % total_links)
+ if index is None:
+ if len(found_links) > 1:
+ raise IndexError(
+ "Multiple links match: %s"
+ % ', '.join([repr(anc) for anc, d, attr in found_links]))
+ found_link = found_links[0]
+ else:
+ try:
+ found_link = found_links[index]
+ except IndexError:
+ raise IndexError(
+ "Only %s (out of %s) links match; index %s out of range"
+ % (len(found_links), total_links, index))
+ return found_link
+
+ def goto(self, href, method='get', **args):
+ """
+ Go to the (potentially relative) link ``href``, using the
+ given method (``'get'`` or ``'post'``) and any extra arguments
+ you want to pass to the ``app.get()`` or ``app.post()``
+ methods.
+
+ All hostnames and schemes will be ignored.
+ """
+ scheme, host, path, query, fragment = urlparse.urlsplit(href)
+ # We
+ scheme = host = fragment = ''
+ href = urlparse.urlunsplit((scheme, host, path, query, fragment))
+ href = urlparse.urljoin(self.request.full_url, href)
+ method = method.lower()
+ assert method in ('get', 'post'), (
+ 'Only "get" or "post" are allowed for method (you gave %r)'
+ % method)
+ if method == 'get':
+ method = self.test_app.get
+ else:
+ method = self.test_app.post
+ return method(href, **args)
+
+ _normal_body_regex = re.compile(r'[ \n\r\t]+')
+
+ def normal_body__get(self):
+ if self._normal_body is None:
+ self._normal_body = self._normal_body_regex.sub(
+ ' ', self.body)
+ return self._normal_body
+
+ normal_body = property(normal_body__get,
+ doc="""
+ Return the whitespace-normalized body
+ """)
+
+ def __contains__(self, s):
+ """
+ A response 'contains' a string if it is present in the body
+ of the response. Whitespace is normalized when searching
+ for a string.
+ """
+ if not isinstance(s, (str, unicode)):
+ s = str(s)
+ if isinstance(s, unicode):
+ ## FIXME: we don't know that this response uses utf8:
+ s = s.encode('utf8')
+ return (self.body.find(s) != -1
+ or self.normal_body.find(s) != -1)
+
+ def mustcontain(self, *strings, **kw):
+ """
+ Assert that the response contains all of the strings passed
+ in as arguments.
+
+ Equivalent to::
+
+ assert string in res
+ """
+ if 'no' in kw:
+ no = kw['no']
+ del kw['no']
+ if isinstance(no, basestring):
+ no = [no]
+ else:
+ no = []
+ if kw:
+ raise TypeError(
+ "The only keyword argument allowed is 'no'")
+ for s in strings:
+ if not s in self:
+ print >> sys.stderr, "Actual response (no %r):" % s
+ print >> sys.stderr, self
+ raise IndexError(
+ "Body does not contain string %r" % s)
+ for no_s in no:
+ if no_s in self:
+ print >> sys.stderr, "Actual response (has %r)" % no_s
+ print >> sys.stderr, self
+ raise IndexError(
+ "Body contains string %r" % s)
+
+ def __repr__(self):
+ return '<Response %s %r>' % (self.full_status, self.body[:20])
+
+ def __str__(self):
+ simple_body = '\n'.join([l for l in self.body.splitlines()
+ if l.strip()])
+ return 'Response: %s\n%s\n%s' % (
+ self.status,
+ '\n'.join(['%s: %s' % (n, v) for n, v in self.headers]),
+ simple_body)
+
+ def showbrowser(self):
+ """
+ Show this response in a browser window (for debugging purposes,
+ when it's hard to read the HTML).
+ """
+ import webbrowser
+ fn = tempnam_no_warning(None, 'paste-fixture') + '.html'
+ f = open(fn, 'wb')
+ f.write(self.body)
+ f.close()
+ url = 'file:' + fn.replace(os.sep, '/')
+ webbrowser.open_new(url)
+
+class TestRequest(object):
+
+ # for py.test
+ disabled = True
+
+ """
+ Instances of this class are created by `TestApp
+ <class-paste.fixture.TestApp.html>`_ with the ``.get()`` and
+ ``.post()`` methods, and are consumed there by ``.do_request()``.
+
+ Instances are also available as a ``.req`` attribute on
+ `TestResponse <class-paste.fixture.TestResponse.html>`_ instances.
+
+ Useful attributes:
+
+ ``url``:
+ The url (actually usually the path) of the request, without
+ query string.
+
+ ``environ``:
+ The environment dictionary used for the request.
+
+ ``full_url``:
+ The url/path, with query string.
+ """
+
+ def __init__(self, url, environ, expect_errors=False):
+ if url.startswith('http://localhost'):
+ url = url[len('http://localhost'):]
+ self.url = url
+ self.environ = environ
+ if environ.get('QUERY_STRING'):
+ self.full_url = url + '?' + environ['QUERY_STRING']
+ else:
+ self.full_url = url
+ self.expect_errors = expect_errors
+
+
+class Form(object):
+
+ """
+ This object represents a form that has been found in a page.
+ This has a couple useful attributes:
+
+ ``text``:
+ the full HTML of the form.
+
+ ``action``:
+ the relative URI of the action.
+
+ ``method``:
+ the method (e.g., ``'GET'``).
+
+ ``id``:
+ the id, or None if not given.
+
+ ``fields``:
+ a dictionary of fields, each value is a list of fields by
+ that name. ``<input type=\"radio\">`` and ``<select>`` are
+ both represented as single fields with multiple options.
+ """
+
+ # @@: This really should be using Mechanize/ClientForm or
+ # something...
+
+ _tag_re = re.compile(r'<(/?)([:a-z0-9_\-]*)([^>]*?)>', re.I)
+
+ def __init__(self, response, text):
+ self.response = response
+ self.text = text
+ self._parse_fields()
+ self._parse_action()
+
+ def _parse_fields(self):
+ in_select = None
+ in_textarea = None
+ fields = {}
+ for match in self._tag_re.finditer(self.text):
+ end = match.group(1) == '/'
+ tag = match.group(2).lower()
+ if tag not in ('input', 'select', 'option', 'textarea',
+ 'button'):
+ continue
+ if tag == 'select' and end:
+ assert in_select, (
+ '%r without starting select' % match.group(0))
+ in_select = None
+ continue
+ if tag == 'textarea' and end:
+ assert in_textarea, (
+ "</textarea> with no <textarea> at %s" % match.start())
+ in_textarea[0].value = html_unquote(self.text[in_textarea[1]:match.start()])
+ in_textarea = None
+ continue
+ if end:
+ continue
+ attrs = _parse_attrs(match.group(3))
+ if 'name' in attrs:
+ name = attrs.pop('name')
+ else:
+ name = None
+ if tag == 'option':
+ in_select.options.append((attrs.get('value'),
+ 'selected' in attrs))
+ continue
+ if tag == 'input' and attrs.get('type') == 'radio':
+ field = fields.get(name)
+ if not field:
+ field = Radio(self, tag, name, match.start(), **attrs)
+ fields.setdefault(name, []).append(field)
+ else:
+ field = field[0]
+ assert isinstance(field, Radio)
+ field.options.append((attrs.get('value'),
+ 'checked' in attrs))
+ continue
+ tag_type = tag
+ if tag == 'input':
+ tag_type = attrs.get('type', 'text').lower()
+ FieldClass = Field.classes.get(tag_type, Field)
+ field = FieldClass(self, tag, name, match.start(), **attrs)
+ if tag == 'textarea':
+ assert not in_textarea, (
+ "Nested textareas: %r and %r"
+ % (in_textarea, match.group(0)))
+ in_textarea = field, match.end()
+ elif tag == 'select':
+ assert not in_select, (
+ "Nested selects: %r and %r"
+ % (in_select, match.group(0)))
+ in_select = field
+ fields.setdefault(name, []).append(field)
+ self.fields = fields
+
+ def _parse_action(self):
+ self.action = None
+ for match in self._tag_re.finditer(self.text):
+ end = match.group(1) == '/'
+ tag = match.group(2).lower()
+ if tag != 'form':
+ continue
+ if end:
+ break
+ attrs = _parse_attrs(match.group(3))
+ self.action = attrs.get('action', '')
+ self.method = attrs.get('method', 'GET')
+ self.id = attrs.get('id')
+ # @@: enctype?
+ else:
+ assert 0, "No </form> tag found"
+ assert self.action is not None, (
+ "No <form> tag found")
+
+ def __setitem__(self, name, value):
+ """
+ Set the value of the named field. If there is 0 or multiple
+ fields by that name, it is an error.
+
+ Setting the value of a ``<select>`` selects the given option
+ (and confirms it is an option). Setting radio fields does the
+ same. Checkboxes get boolean values. You cannot set hidden
+ fields or buttons.
+
+ Use ``.set()`` if there is any ambiguity and you must provide
+ an index.
+ """
+ fields = self.fields.get(name)
+ assert fields is not None, (
+ "No field by the name %r found (fields: %s)"
+ % (name, ', '.join(map(repr, self.fields.keys()))))
+ assert len(fields) == 1, (
+ "Multiple fields match %r: %s"
+ % (name, ', '.join(map(repr, fields))))
+ fields[0].value = value
+
+ def __getitem__(self, name):
+ """
+ Get the named field object (ambiguity is an error).
+ """
+ fields = self.fields.get(name)
+ assert fields is not None, (
+ "No field by the name %r found" % name)
+ assert len(fields) == 1, (
+ "Multiple fields match %r: %s"
+ % (name, ', '.join(map(repr, fields))))
+ return fields[0]
+
+ def set(self, name, value, index=None):
+ """
+ Set the given name, using ``index`` to disambiguate.
+ """
+ if index is None:
+ self[name] = value
+ else:
+ fields = self.fields.get(name)
+ assert fields is not None, (
+ "No fields found matching %r" % name)
+ field = fields[index]
+ field.value = value
+
+ def get(self, name, index=None, default=NoDefault):
+ """
+ Get the named/indexed field object, or ``default`` if no field
+ is found.
+ """
+ fields = self.fields.get(name)
+ if fields is None and default is not NoDefault:
+ return default
+ if index is None:
+ return self[name]
+ else:
+ fields = self.fields.get(name)
+ assert fields is not None, (
+ "No fields found matching %r" % name)
+ field = fields[index]
+ return field
+
+ def select(self, name, value, index=None):
+ """
+ Like ``.set()``, except also confirms the target is a
+ ``<select>``.
+ """
+ field = self.get(name, index=index)
+ assert isinstance(field, Select)
+ field.value = value
+
+ def submit(self, name=None, index=None, **args):
+ """
+ Submits the form. If ``name`` is given, then also select that
+ button (using ``index`` to disambiguate)``.
+
+ Any extra keyword arguments are passed to the ``.get()`` or
+ ``.post()`` method.
+
+ Returns a response object.
+ """
+ fields = self.submit_fields(name, index=index)
+ return self.response.goto(self.action, method=self.method,
+ params=fields, **args)
+
+ def submit_fields(self, name=None, index=None):
+ """
+ Return a list of ``[(name, value), ...]`` for the current
+ state of the form.
+ """
+ submit = []
+ if name is not None:
+ field = self.get(name, index=index)
+ submit.append((field.name, field.value_if_submitted()))
+ for name, fields in self.fields.items():
+ if name is None:
+ continue
+ for field in fields:
+ value = field.value
+ if value is None:
+ continue
+ submit.append((name, value))
+ return submit
+
+
+_attr_re = re.compile(r'([^= \n\r\t]+)[ \n\r\t]*(?:=[ \n\r\t]*(?:"([^"]*)"|([^"][^ \n\r\t>]*)))?', re.S)
+
+def _parse_attrs(text):
+ attrs = {}
+ for match in _attr_re.finditer(text):
+ attr_name = match.group(1).lower()
+ attr_body = match.group(2) or match.group(3)
+ attr_body = html_unquote(attr_body or '')
+ attrs[attr_name] = attr_body
+ return attrs
+
+class Field(object):
+
+ """
+ Field object.
+ """
+
+ # Dictionary of field types (select, radio, etc) to classes
+ classes = {}
+
+ settable = True
+
+ def __init__(self, form, tag, name, pos,
+ value=None, id=None, **attrs):
+ self.form = form
+ self.tag = tag
+ self.name = name
+ self.pos = pos
+ self._value = value
+ self.id = id
+ self.attrs = attrs
+
+ def value__set(self, value):
+ if not self.settable:
+ raise AttributeError(
+ "You cannot set the value of the <%s> field %r"
+ % (self.tag, self.name))
+ self._value = value
+
+ def force_value(self, value):
+ """
+ Like setting a value, except forces it even for, say, hidden
+ fields.
+ """
+ self._value = value
+
+ def value__get(self):
+ return self._value
+
+ value = property(value__get, value__set)
+
+class Select(Field):
+
+ """
+ Field representing ``<select>``
+ """
+
+ def __init__(self, *args, **attrs):
+ super(Select, self).__init__(*args, **attrs)
+ self.options = []
+ self.multiple = attrs.get('multiple')
+ assert not self.multiple, (
+ "<select multiple> not yet supported")
+ # Undetermined yet:
+ self.selectedIndex = None
+
+ def value__set(self, value):
+ for i, (option, checked) in enumerate(self.options):
+ if option == str(value):
+ self.selectedIndex = i
+ break
+ else:
+ raise ValueError(
+ "Option %r not found (from %s)"
+ % (value, ', '.join(
+ [repr(o) for o, c in self.options])))
+
+ def value__get(self):
+ if self.selectedIndex is not None:
+ return self.options[self.selectedIndex][0]
+ else:
+ for option, checked in self.options:
+ if checked:
+ return option
+ else:
+ if self.options:
+ return self.options[0][0]
+ else:
+ return None
+
+ value = property(value__get, value__set)
+
+Field.classes['select'] = Select
+
+class Radio(Select):
+
+ """
+ Field representing ``<input type="radio">``
+ """
+
+Field.classes['radio'] = Radio
+
+class Checkbox(Field):
+
+ """
+ Field representing ``<input type="checkbox">``
+ """
+
+ def __init__(self, *args, **attrs):
+ super(Checkbox, self).__init__(*args, **attrs)
+ self.checked = 'checked' in attrs
+
+ def value__set(self, value):
+ self.checked = not not value
+
+ def value__get(self):
+ if self.checked:
+ if self._value is None:
+ return 'on'
+ else:
+ return self._value
+ else:
+ return None
+
+ value = property(value__get, value__set)
+
+Field.classes['checkbox'] = Checkbox
+
+class Text(Field):
+ """
+ Field representing ``<input type="text">``
+ """
+ def __init__(self, form, tag, name, pos,
+ value='', id=None, **attrs):
+ #text fields default to empty string
+ Field.__init__(self, form, tag, name, pos,
+ value=value, id=id, **attrs)
+
+Field.classes['text'] = Text
+
+class Textarea(Text):
+ """
+ Field representing ``<textarea>``
+ """
+
+Field.classes['textarea'] = Textarea
+
+class Hidden(Text):
+ """
+ Field representing ``<input type="hidden">``
+ """
+
+Field.classes['hidden'] = Hidden
+
+class Submit(Field):
+ """
+ Field representing ``<input type="submit">`` and ``<button>``
+ """
+
+ settable = False
+
+ def value__get(self):
+ return None
+
+ value = property(value__get)
+
+ def value_if_submitted(self):
+ return self._value
+
+Field.classes['submit'] = Submit
+
+Field.classes['button'] = Submit
+
+Field.classes['image'] = Submit
+
+############################################################
+## Command-line testing
+############################################################
+
+
+class TestFileEnvironment(object):
+
+ """
+ This represents an environment in which files will be written, and
+ scripts will be run.
+ """
+
+ # for py.test
+ disabled = True
+
+ def __init__(self, base_path, template_path=None,
+ script_path=None,
+ environ=None, cwd=None, start_clear=True,
+ ignore_paths=None, ignore_hidden=True):
+ """
+ Creates an environment. ``base_path`` is used as the current
+ working directory, and generally where changes are looked for.
+
+ ``template_path`` is the directory to look for *template*
+ files, which are files you'll explicitly add to the
+ environment. This is done with ``.writefile()``.
+
+ ``script_path`` is the PATH for finding executables. Usually
+ grabbed from ``$PATH``.
+
+ ``environ`` is the operating system environment,
+ ``os.environ`` if not given.
+
+ ``cwd`` is the working directory, ``base_path`` by default.
+
+ If ``start_clear`` is true (default) then the ``base_path``
+ will be cleared (all files deleted) when an instance is
+ created. You can also use ``.clear()`` to clear the files.
+
+ ``ignore_paths`` is a set of specific filenames that should be
+ ignored when created in the environment. ``ignore_hidden``
+ means, if true (default) that filenames and directories
+ starting with ``'.'`` will be ignored.
+ """
+ self.base_path = base_path
+ self.template_path = template_path
+ if environ is None:
+ environ = os.environ.copy()
+ self.environ = environ
+ if script_path is None:
+ if sys.platform == 'win32':
+ script_path = environ.get('PATH', '').split(';')
+ else:
+ script_path = environ.get('PATH', '').split(':')
+ self.script_path = script_path
+ if cwd is None:
+ cwd = base_path
+ self.cwd = cwd
+ if start_clear:
+ self.clear()
+ elif not os.path.exists(base_path):
+ os.makedirs(base_path)
+ self.ignore_paths = ignore_paths or []
+ self.ignore_hidden = ignore_hidden
+
+ def run(self, script, *args, **kw):
+ """
+ Run the command, with the given arguments. The ``script``
+ argument can have space-separated arguments, or you can use
+ the positional arguments.
+
+ Keywords allowed are:
+
+ ``expect_error``: (default False)
+ Don't raise an exception in case of errors
+ ``expect_stderr``: (default ``expect_error``)
+ Don't raise an exception if anything is printed to stderr
+ ``stdin``: (default ``""``)
+ Input to the script
+ ``printresult``: (default True)
+ Print the result after running
+ ``cwd``: (default ``self.cwd``)
+ The working directory to run in
+
+ Returns a `ProcResponse
+ <class-paste.fixture.ProcResponse.html>`_ object.
+ """
+ __tracebackhide__ = True
+ expect_error = _popget(kw, 'expect_error', False)
+ expect_stderr = _popget(kw, 'expect_stderr', expect_error)
+ cwd = _popget(kw, 'cwd', self.cwd)
+ stdin = _popget(kw, 'stdin', None)
+ printresult = _popget(kw, 'printresult', True)
+ args = map(str, args)
+ assert not kw, (
+ "Arguments not expected: %s" % ', '.join(kw.keys()))
+ if ' ' in script:
+ assert not args, (
+ "You cannot give a multi-argument script (%r) "
+ "and arguments (%s)" % (script, args))
+ script, args = script.split(None, 1)
+ args = shlex.split(args)
+ script = self._find_exe(script)
+ all = [script] + args
+ files_before = self._find_files()
+ proc = subprocess.Popen(all, stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ cwd=cwd,
+ env=self.environ)
+ stdout, stderr = proc.communicate(stdin)
+ files_after = self._find_files()
+ result = ProcResult(
+ self, all, stdin, stdout, stderr,
+ returncode=proc.returncode,
+ files_before=files_before,
+ files_after=files_after)
+ if printresult:
+ print result
+ print '-'*40
+ if not expect_error:
+ result.assert_no_error()
+ if not expect_stderr:
+ result.assert_no_stderr()
+ return result
+
+ def _find_exe(self, script_name):
+ if self.script_path is None:
+ script_name = os.path.join(self.cwd, script_name)
+ if not os.path.exists(script_name):
+ raise OSError(
+ "Script %s does not exist" % script_name)
+ return script_name
+ for path in self.script_path:
+ fn = os.path.join(path, script_name)
+ if os.path.exists(fn):
+ return fn
+ raise OSError(
+ "Script %s could not be found in %s"
+ % (script_name, ':'.join(self.script_path)))
+
+ def _find_files(self):
+ result = {}
+ for fn in os.listdir(self.base_path):
+ if self._ignore_file(fn):
+ continue
+ self._find_traverse(fn, result)
+ return result
+
+ def _ignore_file(self, fn):
+ if fn in self.ignore_paths:
+ return True
+ if self.ignore_hidden and os.path.basename(fn).startswith('.'):
+ return True
+ return False
+
+ def _find_traverse(self, path, result):
+ full = os.path.join(self.base_path, path)
+ if os.path.isdir(full):
+ result[path] = FoundDir(self.base_path, path)
+ for fn in os.listdir(full):
+ fn = os.path.join(path, fn)
+ if self._ignore_file(fn):
+ continue
+ self._find_traverse(fn, result)
+ else:
+ result[path] = FoundFile(self.base_path, path)
+
+ def clear(self):
+ """
+ Delete all the files in the base directory.
+ """
+ if os.path.exists(self.base_path):
+ shutil.rmtree(self.base_path)
+ os.mkdir(self.base_path)
+
+ def writefile(self, path, content=None,
+ frompath=None):
+ """
+ Write a file to the given path. If ``content`` is given then
+ that text is written, otherwise the file in ``frompath`` is
+ used. ``frompath`` is relative to ``self.template_path``
+ """
+ full = os.path.join(self.base_path, path)
+ if not os.path.exists(os.path.dirname(full)):
+ os.makedirs(os.path.dirname(full))
+ f = open(full, 'wb')
+ if content is not None:
+ f.write(content)
+ if frompath is not None:
+ if self.template_path:
+ frompath = os.path.join(self.template_path, frompath)
+ f2 = open(frompath, 'rb')
+ f.write(f2.read())
+ f2.close()
+ f.close()
+ return FoundFile(self.base_path, path)
+
+class ProcResult(object):
+
+ """
+ Represents the results of running a command in
+ `TestFileEnvironment
+ <class-paste.fixture.TestFileEnvironment.html>`_.
+
+ Attributes to pay particular attention to:
+
+ ``stdout``, ``stderr``:
+ What is produced
+
+ ``files_created``, ``files_deleted``, ``files_updated``:
+ Dictionaries mapping filenames (relative to the ``base_dir``)
+ to `FoundFile <class-paste.fixture.FoundFile.html>`_ or
+ `FoundDir <class-paste.fixture.FoundDir.html>`_ objects.
+ """
+
+ def __init__(self, test_env, args, stdin, stdout, stderr,
+ returncode, files_before, files_after):
+ self.test_env = test_env
+ self.args = args
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+ self.returncode = returncode
+ self.files_before = files_before
+ self.files_after = files_after
+ self.files_deleted = {}
+ self.files_updated = {}
+ self.files_created = files_after.copy()
+ for path, f in files_before.items():
+ if path not in files_after:
+ self.files_deleted[path] = f
+ continue
+ del self.files_created[path]
+ if f.mtime < files_after[path].mtime:
+ self.files_updated[path] = files_after[path]
+
+ def assert_no_error(self):
+ __tracebackhide__ = True
+ assert self.returncode == 0, (
+ "Script returned code: %s" % self.returncode)
+
+ def assert_no_stderr(self):
+ __tracebackhide__ = True
+ if self.stderr:
+ print 'Error output:'
+ print self.stderr
+ raise AssertionError("stderr output not expected")
+
+ def __str__(self):
+ s = ['Script result: %s' % ' '.join(self.args)]
+ if self.returncode:
+ s.append(' return code: %s' % self.returncode)
+ if self.stderr:
+ s.append('-- stderr: --------------------')
+ s.append(self.stderr)
+ if self.stdout:
+ s.append('-- stdout: --------------------')
+ s.append(self.stdout)
+ for name, files, show_size in [
+ ('created', self.files_created, True),
+ ('deleted', self.files_deleted, True),
+ ('updated', self.files_updated, True)]:
+ if files:
+ s.append('-- %s: -------------------' % name)
+ files = files.items()
+ files.sort()
+ last = ''
+ for path, f in files:
+ t = ' %s' % _space_prefix(last, path, indent=4,
+ include_sep=False)
+ last = path
+ if show_size and f.size != 'N/A':
+ t += ' (%s bytes)' % f.size
+ s.append(t)
+ return '\n'.join(s)
+
+class FoundFile(object):
+
+ """
+ Represents a single file found as the result of a command.
+
+ Has attributes:
+
+ ``path``:
+ The path of the file, relative to the ``base_path``
+
+ ``full``:
+ The full path
+
+ ``stat``:
+ The results of ``os.stat``. Also ``mtime`` and ``size``
+ contain the ``.st_mtime`` and ``st_size`` of the stat.
+
+ ``bytes``:
+ The contents of the file.
+
+ You may use the ``in`` operator with these objects (tested against
+ the contents of the file), and the ``.mustcontain()`` method.
+ """
+
+ file = True
+ dir = False
+
+ def __init__(self, base_path, path):
+ self.base_path = base_path
+ self.path = path
+ self.full = os.path.join(base_path, path)
+ self.stat = os.stat(self.full)
+ self.mtime = self.stat.st_mtime
+ self.size = self.stat.st_size
+ self._bytes = None
+
+ def bytes__get(self):
+ if self._bytes is None:
+ f = open(self.full, 'rb')
+ self._bytes = f.read()
+ f.close()
+ return self._bytes
+ bytes = property(bytes__get)
+
+ def __contains__(self, s):
+ return s in self.bytes
+
+ def mustcontain(self, s):
+ __tracebackhide__ = True
+ bytes = self.bytes
+ if s not in bytes:
+ print 'Could not find %r in:' % s
+ print bytes
+ assert s in bytes
+
+ def __repr__(self):
+ return '<%s %s:%s>' % (
+ self.__class__.__name__,
+ self.base_path, self.path)
+
+class FoundDir(object):
+
+ """
+ Represents a directory created by a command.
+ """
+
+ file = False
+ dir = True
+
+ def __init__(self, base_path, path):
+ self.base_path = base_path
+ self.path = path
+ self.full = os.path.join(base_path, path)
+ self.size = 'N/A'
+ self.mtime = 'N/A'
+
+ def __repr__(self):
+ return '<%s %s:%s>' % (
+ self.__class__.__name__,
+ self.base_path, self.path)
+
+def _popget(d, key, default=None):
+ """
+ Pop the key if found (else return default)
+ """
+ if key in d:
+ return d.pop(key)
+ return default
+
+def _space_prefix(pref, full, sep=None, indent=None, include_sep=True):
+ """
+ Anything shared by pref and full will be replaced with spaces
+ in full, and full returned.
+ """
+ if sep is None:
+ sep = os.path.sep
+ pref = pref.split(sep)
+ full = full.split(sep)
+ padding = []
+ while pref and full and pref[0] == full[0]:
+ if indent is None:
+ padding.append(' ' * (len(full[0]) + len(sep)))
+ else:
+ padding.append(' ' * indent)
+ full.pop(0)
+ pref.pop(0)
+ if padding:
+ if include_sep:
+ return ''.join(padding) + sep + sep.join(full)
+ else:
+ return ''.join(padding) + sep.join(full)
+ else:
+ return sep.join(full)
+
+def _make_pattern(pat):
+ if pat is None:
+ return None
+ if isinstance(pat, (str, unicode)):
+ pat = re.compile(pat)
+ if hasattr(pat, 'search'):
+ return pat.search
+ if callable(pat):
+ return pat
+ assert 0, (
+ "Cannot make callable pattern object out of %r" % pat)
+
+def setup_module(module=None):
+ """
+ This is used by py.test if it is in the module, so you can
+ import this directly.
+
+ Use like::
+
+ from paste.fixture import setup_module
+ """
+ # Deprecated June 2008
+ import warnings
+ warnings.warn(
+ 'setup_module is deprecated',
+ DeprecationWarning, 2)
+ if module is None:
+ # The module we were called from must be the module...
+ module = sys._getframe().f_back.f_globals['__name__']
+ if isinstance(module, (str, unicode)):
+ module = sys.modules[module]
+ if hasattr(module, 'reset_state'):
+ module.reset_state()
+
+def html_unquote(v):
+ """
+ Unquote (some) entities in HTML. (incomplete)
+ """
+ for ent, repl in [('&nbsp;', ' '), ('&gt;', '>'),
+ ('&lt;', '<'), ('&quot;', '"'),
+ ('&amp;', '&')]:
+ v = v.replace(ent, repl)
+ return v
diff --git a/paste/flup_session.py b/paste/flup_session.py
new file mode 100644
index 0000000..b230ab8
--- /dev/null
+++ b/paste/flup_session.py
@@ -0,0 +1,108 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Creates a session object.
+
+In your application, use::
+
+ environ['paste.flup_session_service'].session
+
+This will return a dictionary. The contents of this dictionary will
+be saved to disk when the request is completed. The session will be
+created when you first fetch the session dictionary, and a cookie will
+be sent in that case. There's current no way to use sessions without
+cookies, and there's no way to delete a session except to clear its
+data.
+"""
+
+from paste import httpexceptions
+from paste import wsgilib
+import flup.middleware.session
+flup_session = flup.middleware.session
+
+# This is a dictionary of existing stores, keyed by a tuple of
+# store type and parameters
+store_cache = {}
+
+class NoDefault(object):
+ pass
+
+class SessionMiddleware(object):
+
+ session_classes = {
+ 'memory': (flup_session.MemorySessionStore,
+ [('session_timeout', 'timeout', int, 60)]),
+ 'disk': (flup_session.DiskSessionStore,
+ [('session_timeout', 'timeout', int, 60),
+ ('session_dir', 'storeDir', str, '/tmp/sessions')]),
+ 'shelve': (flup_session.ShelveSessionStore,
+ [('session_timeout', 'timeout', int, 60),
+ ('session_file', 'storeFile', str,
+ '/tmp/session.shelve')]),
+ }
+
+
+ def __init__(self, app,
+ global_conf=None,
+ session_type=NoDefault,
+ cookie_name=NoDefault,
+ **store_config
+ ):
+ self.application = app
+ if session_type is NoDefault:
+ session_type = global_conf.get('session_type', 'disk')
+ self.session_type = session_type
+ try:
+ self.store_class, self.store_args = self.session_classes[self.session_type]
+ except KeyError:
+ raise KeyError(
+ "The session_type %s is unknown (I know about %s)"
+ % (self.session_type,
+ ', '.join(self.session_classes.keys())))
+ kw = {}
+ for config_name, kw_name, coercer, default in self.store_args:
+ value = coercer(store_config.get(config_name, default))
+ kw[kw_name] = value
+ self.store = self.store_class(**kw)
+ if cookie_name is NoDefault:
+ cookie_name = global_conf.get('session_cookie', '_SID_')
+ self.cookie_name = cookie_name
+
+ def __call__(self, environ, start_response):
+ service = flup_session.SessionService(
+ self.store, environ, cookieName=self.cookie_name,
+ fieldName=self.cookie_name)
+ environ['paste.flup_session_service'] = service
+
+ def cookie_start_response(status, headers, exc_info=None):
+ service.addCookie(headers)
+ return start_response(status, headers, exc_info)
+
+ try:
+ app_iter = self.application(environ, cookie_start_response)
+ except httpexceptions.HTTPException, e:
+ headers = (e.headers or {}).items()
+ service.addCookie(headers)
+ e.headers = dict(headers)
+ service.close()
+ raise
+ except:
+ service.close()
+ raise
+
+ return wsgilib.add_close(app_iter, service.close)
+
+def make_session_middleware(app, global_conf,
+ session_type=NoDefault,
+ cookie_name=NoDefault,
+ **store_config):
+ """
+ Wraps the application in a session-managing middleware.
+ The session service can then be found in
+ ``environ['paste.flup_session_service']``
+ """
+ return SessionMiddleware(
+ app, global_conf=global_conf,
+ session_type=session_type, cookie_name=cookie_name,
+ **store_config)
diff --git a/paste/gzipper.py b/paste/gzipper.py
new file mode 100644
index 0000000..1431490
--- /dev/null
+++ b/paste/gzipper.py
@@ -0,0 +1,111 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+WSGI middleware
+
+Gzip-encodes the response.
+"""
+
+import gzip
+from paste.response import header_value, remove_header
+from paste.httpheaders import CONTENT_LENGTH
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+class GzipOutput(object):
+ pass
+
+class middleware(object):
+
+ def __init__(self, application, compress_level=6):
+ self.application = application
+ self.compress_level = int(compress_level)
+
+ def __call__(self, environ, start_response):
+ if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
+ # nothing for us to do, so this middleware will
+ # be a no-op:
+ return self.application(environ, start_response)
+ response = GzipResponse(start_response, self.compress_level)
+ app_iter = self.application(environ,
+ response.gzip_start_response)
+ if app_iter is not None:
+ response.finish_response(app_iter)
+
+ return response.write()
+
+class GzipResponse(object):
+
+ def __init__(self, start_response, compress_level):
+ self.start_response = start_response
+ self.compress_level = compress_level
+ self.buffer = StringIO()
+ self.compressible = False
+ self.content_length = None
+
+ def gzip_start_response(self, status, headers, exc_info=None):
+ self.headers = headers
+ ct = header_value(headers,'content-type')
+ ce = header_value(headers,'content-encoding')
+ self.compressible = False
+ if ct and (ct.startswith('text/') or ct.startswith('application/')) \
+ and 'zip' not in ct:
+ self.compressible = True
+ if ce:
+ self.compressible = False
+ if self.compressible:
+ headers.append(('content-encoding', 'gzip'))
+ remove_header(headers, 'content-length')
+ self.headers = headers
+ self.status = status
+ return self.buffer.write
+
+ def write(self):
+ out = self.buffer
+ out.seek(0)
+ s = out.getvalue()
+ out.close()
+ return [s]
+
+ def finish_response(self, app_iter):
+ if self.compressible:
+ output = gzip.GzipFile(mode='wb', compresslevel=self.compress_level,
+ fileobj=self.buffer)
+ else:
+ output = self.buffer
+ try:
+ for s in app_iter:
+ output.write(s)
+ if self.compressible:
+ output.close()
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ content_length = self.buffer.tell()
+ CONTENT_LENGTH.update(self.headers, content_length)
+ self.start_response(self.status, self.headers)
+
+def filter_factory(application, **conf):
+ import warnings
+ warnings.warn(
+ 'This function is deprecated; use make_gzip_middleware instead',
+ DeprecationWarning, 2)
+ def filter(application):
+ return middleware(application)
+ return filter
+
+def make_gzip_middleware(app, global_conf, compress_level=6):
+ """
+ Wrap the middleware, so that it applies gzipping to a response
+ when it is supported by the browser and the content is of
+ type ``text/*`` or ``application/*``
+ """
+ compress_level = int(compress_level)
+ return middleware(app, compress_level=compress_level)
diff --git a/paste/httpexceptions.py b/paste/httpexceptions.py
new file mode 100644
index 0000000..85e7c84
--- /dev/null
+++ b/paste/httpexceptions.py
@@ -0,0 +1,666 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Ian Bicking, Clark C. Evans and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# Some of this code was funded by http://prometheusresearch.com
+"""
+HTTP Exception Middleware
+
+This module processes Python exceptions that relate to HTTP exceptions
+by defining a set of exceptions, all subclasses of HTTPException, and a
+request handler (`middleware`) that catches these exceptions and turns
+them into proper responses.
+
+This module defines exceptions according to RFC 2068 [1]_ : codes with
+100-300 are not really errors; 400's are client errors, and 500's are
+server errors. According to the WSGI specification [2]_ , the application
+can call ``start_response`` more then once only under two conditions:
+(a) the response has not yet been sent, or (b) if the second and
+subsequent invocations of ``start_response`` have a valid ``exc_info``
+argument obtained from ``sys.exc_info()``. The WSGI specification then
+requires the server or gateway to handle the case where content has been
+sent and then an exception was encountered.
+
+Exceptions in the 5xx range and those raised after ``start_response``
+has been called are treated as serious errors and the ``exc_info`` is
+filled-in with information needed for a lower level module to generate a
+stack trace and log information.
+
+Exception
+ HTTPException
+ HTTPRedirection
+ * 300 - HTTPMultipleChoices
+ * 301 - HTTPMovedPermanently
+ * 302 - HTTPFound
+ * 303 - HTTPSeeOther
+ * 304 - HTTPNotModified
+ * 305 - HTTPUseProxy
+ * 306 - Unused (not implemented, obviously)
+ * 307 - HTTPTemporaryRedirect
+ HTTPError
+ HTTPClientError
+ * 400 - HTTPBadRequest
+ * 401 - HTTPUnauthorized
+ * 402 - HTTPPaymentRequired
+ * 403 - HTTPForbidden
+ * 404 - HTTPNotFound
+ * 405 - HTTPMethodNotAllowed
+ * 406 - HTTPNotAcceptable
+ * 407 - HTTPProxyAuthenticationRequired
+ * 408 - HTTPRequestTimeout
+ * 409 - HTTPConfict
+ * 410 - HTTPGone
+ * 411 - HTTPLengthRequired
+ * 412 - HTTPPreconditionFailed
+ * 413 - HTTPRequestEntityTooLarge
+ * 414 - HTTPRequestURITooLong
+ * 415 - HTTPUnsupportedMediaType
+ * 416 - HTTPRequestRangeNotSatisfiable
+ * 417 - HTTPExpectationFailed
+ * 429 - HTTPTooManyRequests
+ HTTPServerError
+ * 500 - HTTPInternalServerError
+ * 501 - HTTPNotImplemented
+ * 502 - HTTPBadGateway
+ * 503 - HTTPServiceUnavailable
+ * 504 - HTTPGatewayTimeout
+ * 505 - HTTPVersionNotSupported
+
+References:
+
+.. [1] http://www.python.org/peps/pep-0333.html#error-handling
+.. [2] http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5
+
+"""
+
+import types
+from paste.wsgilib import catch_errors_app
+from paste.response import has_header, header_value, replace_header
+from paste.request import resolve_relative_url
+from paste.util.quoting import strip_html, html_quote, no_quote, comment_quote
+
+SERVER_NAME = 'WSGI Server'
+TEMPLATE = """\
+<html>\r
+ <head><title>%(title)s</title></head>\r
+ <body>\r
+ <h1>%(title)s</h1>\r
+ <p>%(body)s</p>\r
+ <hr noshade>\r
+ <div align="right">%(server)s</div>\r
+ </body>\r
+</html>\r
+"""
+
+class HTTPException(Exception):
+ """
+ the HTTP exception base class
+
+ This encapsulates an HTTP response that interrupts normal application
+ flow; but one which is not necessarly an error condition. For
+ example, codes in the 300's are exceptions in that they interrupt
+ normal processing; however, they are not considered errors.
+
+ This class is complicated by 4 factors:
+
+ 1. The content given to the exception may either be plain-text or
+ as html-text.
+
+ 2. The template may want to have string-substitutions taken from
+ the current ``environ`` or values from incoming headers. This
+ is especially troublesome due to case sensitivity.
+
+ 3. The final output may either be text/plain or text/html
+ mime-type as requested by the client application.
+
+ 4. Each exception has a default explanation, but those who
+ raise exceptions may want to provide additional detail.
+
+ Attributes:
+
+ ``code``
+ the HTTP status code for the exception
+
+ ``title``
+ remainder of the status line (stuff after the code)
+
+ ``explanation``
+ a plain-text explanation of the error message that is
+ not subject to environment or header substitutions;
+ it is accessible in the template via %(explanation)s
+
+ ``detail``
+ a plain-text message customization that is not subject
+ to environment or header substitutions; accessible in
+ the template via %(detail)s
+
+ ``template``
+ a content fragment (in HTML) used for environment and
+ header substitution; the default template includes both
+ the explanation and further detail provided in the
+ message
+
+ ``required_headers``
+ a sequence of headers which are required for proper
+ construction of the exception
+
+ Parameters:
+
+ ``detail``
+ a plain-text override of the default ``detail``
+
+ ``headers``
+ a list of (k,v) header pairs
+
+ ``comment``
+ a plain-text additional information which is
+ usually stripped/hidden for end-users
+
+ To override the template (which is HTML content) or the plain-text
+ explanation, one must subclass the given exception; or customize it
+ after it has been created. This particular breakdown of a message
+ into explanation, detail and template allows both the creation of
+ plain-text and html messages for various clients as well as
+ error-free substitution of environment variables and headers.
+ """
+
+ code = None
+ title = None
+ explanation = ''
+ detail = ''
+ comment = ''
+ template = "%(explanation)s\r\n<br/>%(detail)s\r\n<!-- %(comment)s -->"
+ required_headers = ()
+
+ def __init__(self, detail=None, headers=None, comment=None):
+ assert self.code, "Do not directly instantiate abstract exceptions."
+ assert isinstance(headers, (type(None), list)), (
+ "headers must be None or a list: %r"
+ % headers)
+ assert isinstance(detail, (type(None), basestring)), (
+ "detail must be None or a string: %r" % detail)
+ assert isinstance(comment, (type(None), basestring)), (
+ "comment must be None or a string: %r" % comment)
+ self.headers = headers or tuple()
+ for req in self.required_headers:
+ assert headers and has_header(headers, req), (
+ "Exception %s must be passed the header %r "
+ "(got headers: %r)"
+ % (self.__class__.__name__, req, headers))
+ if detail is not None:
+ self.detail = detail
+ if comment is not None:
+ self.comment = comment
+ Exception.__init__(self,"%s %s\n%s\n%s\n" % (
+ self.code, self.title, self.explanation, self.detail))
+
+ def make_body(self, environ, template, escfunc, comment_escfunc=None):
+ comment_escfunc = comment_escfunc or escfunc
+ args = {'explanation': escfunc(self.explanation),
+ 'detail': escfunc(self.detail),
+ 'comment': comment_escfunc(self.comment)}
+ if HTTPException.template != self.template:
+ for (k, v) in environ.items():
+ args[k] = escfunc(v)
+ if self.headers:
+ for (k, v) in self.headers:
+ args[k.lower()] = escfunc(v)
+ for key, value in args.items():
+ if isinstance(value, unicode):
+ args[key] = value.encode('utf8', 'xmlcharrefreplace')
+ return template % args
+
+ def plain(self, environ):
+ """ text/plain representation of the exception """
+ body = self.make_body(environ, strip_html(self.template), no_quote, comment_quote)
+ return ('%s %s\r\n%s\r\n' % (self.code, self.title, body))
+
+ def html(self, environ):
+ """ text/html representation of the exception """
+ body = self.make_body(environ, self.template, html_quote, comment_quote)
+ return TEMPLATE % {
+ 'title': self.title,
+ 'code': self.code,
+ 'server': SERVER_NAME,
+ 'body': body }
+
+ def prepare_content(self, environ):
+ if self.headers:
+ headers = list(self.headers)
+ else:
+ headers = []
+ if 'html' in environ.get('HTTP_ACCEPT','') or \
+ '*/*' in environ.get('HTTP_ACCEPT',''):
+ replace_header(headers, 'content-type', 'text/html')
+ content = self.html(environ)
+ else:
+ replace_header(headers, 'content-type', 'text/plain')
+ content = self.plain(environ)
+ if isinstance(content, unicode):
+ content = content.encode('utf8')
+ cur_content_type = (
+ header_value(headers, 'content-type')
+ or 'text/html')
+ replace_header(
+ headers, 'content-type',
+ cur_content_type + '; charset=utf8')
+ return headers, content
+
+ def response(self, environ):
+ from paste.wsgiwrappers import WSGIResponse
+ headers, content = self.prepare_content(environ)
+ resp = WSGIResponse(code=self.code, content=content)
+ resp.headers = resp.headers.fromlist(headers)
+ return resp
+
+ def wsgi_application(self, environ, start_response, exc_info=None):
+ """
+ This exception as a WSGI application
+ """
+ headers, content = self.prepare_content(environ)
+ start_response('%s %s' % (self.code, self.title),
+ headers,
+ exc_info)
+ return [content]
+
+ __call__ = wsgi_application
+
+ def __repr__(self):
+ return '<%s %s; code=%s>' % (self.__class__.__name__,
+ self.title, self.code)
+
+class HTTPError(HTTPException):
+ """
+ base class for status codes in the 400's and 500's
+
+ This is an exception which indicates that an error has occurred,
+ and that any work in progress should not be committed. These are
+ typically results in the 400's and 500's.
+ """
+
+#
+# 3xx Redirection
+#
+# This class of status code indicates that further action needs to be
+# taken by the user agent in order to fulfill the request. The action
+# required MAY be carried out by the user agent without interaction with
+# the user if and only if the method used in the second request is GET or
+# HEAD. A client SHOULD detect infinite redirection loops, since such
+# loops generate network traffic for each redirection.
+#
+
+class HTTPRedirection(HTTPException):
+ """
+ base class for 300's status code (redirections)
+
+ This is an abstract base class for 3xx redirection. It indicates
+ that further action needs to be taken by the user agent in order
+ to fulfill the request. It does not necessarly signal an error
+ condition.
+ """
+
+class _HTTPMove(HTTPRedirection):
+ """
+ redirections which require a Location field
+
+ Since a 'Location' header is a required attribute of 301, 302, 303,
+ 305 and 307 (but not 304), this base class provides the mechanics to
+ make this easy. While this has the same parameters as HTTPException,
+ if a location is not provided in the headers; it is assumed that the
+ detail _is_ the location (this for backward compatibility, otherwise
+ we'd add a new attribute).
+ """
+ required_headers = ('location',)
+ explanation = 'The resource has been moved to'
+ template = (
+ '%(explanation)s <a href="%(location)s">%(location)s</a>;\r\n'
+ 'you should be redirected automatically.\r\n'
+ '%(detail)s\r\n<!-- %(comment)s -->')
+
+ def __init__(self, detail=None, headers=None, comment=None):
+ assert isinstance(headers, (type(None), list))
+ headers = headers or []
+ location = header_value(headers,'location')
+ if not location:
+ location = detail
+ detail = ''
+ headers.append(('location', location))
+ assert location, ("HTTPRedirection specified neither a "
+ "location in the headers nor did it "
+ "provide a detail argument.")
+ HTTPRedirection.__init__(self, location, headers, comment)
+ if detail is not None:
+ self.detail = detail
+
+ def relative_redirect(cls, dest_uri, environ, detail=None, headers=None, comment=None):
+ """
+ Create a redirect object with the dest_uri, which may be relative,
+ considering it relative to the uri implied by the given environ.
+ """
+ location = resolve_relative_url(dest_uri, environ)
+ headers = headers or []
+ headers.append(('Location', location))
+ return cls(detail=detail, headers=headers, comment=comment)
+
+ relative_redirect = classmethod(relative_redirect)
+
+ def location(self):
+ for name, value in self.headers:
+ if name.lower() == 'location':
+ return value
+ else:
+ raise KeyError("No location set for %s" % self)
+
+class HTTPMultipleChoices(_HTTPMove):
+ code = 300
+ title = 'Multiple Choices'
+
+class HTTPMovedPermanently(_HTTPMove):
+ code = 301
+ title = 'Moved Permanently'
+
+class HTTPFound(_HTTPMove):
+ code = 302
+ title = 'Found'
+ explanation = 'The resource was found at'
+
+# This one is safe after a POST (the redirected location will be
+# retrieved with GET):
+class HTTPSeeOther(_HTTPMove):
+ code = 303
+ title = 'See Other'
+
+class HTTPNotModified(HTTPRedirection):
+ # @@: but not always (HTTP section 14.18.1)...?
+ # @@: Removed 'date' requirement, as its not required for an ETag
+ # @@: FIXME: This should require either an ETag or a date header
+ code = 304
+ title = 'Not Modified'
+ message = ''
+ # @@: should include date header, optionally other headers
+ # @@: should not return a content body
+ def plain(self, environ):
+ return ''
+ def html(self, environ):
+ """ text/html representation of the exception """
+ return ''
+
+class HTTPUseProxy(_HTTPMove):
+ # @@: OK, not a move, but looks a little like one
+ code = 305
+ title = 'Use Proxy'
+ explanation = (
+ 'The resource must be accessed through a proxy '
+ 'located at')
+
+class HTTPTemporaryRedirect(_HTTPMove):
+ code = 307
+ title = 'Temporary Redirect'
+
+#
+# 4xx Client Error
+#
+# The 4xx class of status code is intended for cases in which the client
+# seems to have erred. Except when responding to a HEAD request, the
+# server SHOULD include an entity containing an explanation of the error
+# situation, and whether it is a temporary or permanent condition. These
+# status codes are applicable to any request method. User agents SHOULD
+# display any included entity to the user.
+#
+
+class HTTPClientError(HTTPError):
+ """
+ base class for the 400's, where the client is in-error
+
+ This is an error condition in which the client is presumed to be
+ in-error. This is an expected problem, and thus is not considered
+ a bug. A server-side traceback is not warranted. Unless specialized,
+ this is a '400 Bad Request'
+ """
+ code = 400
+ title = 'Bad Request'
+ explanation = ('The server could not comply with the request since\r\n'
+ 'it is either malformed or otherwise incorrect.\r\n')
+
+class HTTPBadRequest(HTTPClientError):
+ pass
+
+class HTTPUnauthorized(HTTPClientError):
+ code = 401
+ title = 'Unauthorized'
+ explanation = (
+ 'This server could not verify that you are authorized to\r\n'
+ 'access the document you requested. Either you supplied the\r\n'
+ 'wrong credentials (e.g., bad password), or your browser\r\n'
+ 'does not understand how to supply the credentials required.\r\n')
+
+class HTTPPaymentRequired(HTTPClientError):
+ code = 402
+ title = 'Payment Required'
+ explanation = ('Access was denied for financial reasons.')
+
+class HTTPForbidden(HTTPClientError):
+ code = 403
+ title = 'Forbidden'
+ explanation = ('Access was denied to this resource.')
+
+class HTTPNotFound(HTTPClientError):
+ code = 404
+ title = 'Not Found'
+ explanation = ('The resource could not be found.')
+
+class HTTPMethodNotAllowed(HTTPClientError):
+ required_headers = ('allow',)
+ code = 405
+ title = 'Method Not Allowed'
+ # override template since we need an environment variable
+ template = ('The method %(REQUEST_METHOD)s is not allowed for '
+ 'this resource.\r\n%(detail)s')
+
+class HTTPNotAcceptable(HTTPClientError):
+ code = 406
+ title = 'Not Acceptable'
+ # override template since we need an environment variable
+ template = ('The resource could not be generated that was '
+ 'acceptable to your browser (content\r\nof type '
+ '%(HTTP_ACCEPT)s).\r\n%(detail)s')
+
+class HTTPProxyAuthenticationRequired(HTTPClientError):
+ code = 407
+ title = 'Proxy Authentication Required'
+ explanation = ('Authentication /w a local proxy is needed.')
+
+class HTTPRequestTimeout(HTTPClientError):
+ code = 408
+ title = 'Request Timeout'
+ explanation = ('The server has waited too long for the request to '
+ 'be sent by the client.')
+
+class HTTPConflict(HTTPClientError):
+ code = 409
+ title = 'Conflict'
+ explanation = ('There was a conflict when trying to complete '
+ 'your request.')
+
+class HTTPGone(HTTPClientError):
+ code = 410
+ title = 'Gone'
+ explanation = ('This resource is no longer available. No forwarding '
+ 'address is given.')
+
+class HTTPLengthRequired(HTTPClientError):
+ code = 411
+ title = 'Length Required'
+ explanation = ('Content-Length header required.')
+
+class HTTPPreconditionFailed(HTTPClientError):
+ code = 412
+ title = 'Precondition Failed'
+ explanation = ('Request precondition failed.')
+
+class HTTPRequestEntityTooLarge(HTTPClientError):
+ code = 413
+ title = 'Request Entity Too Large'
+ explanation = ('The body of your request was too large for this server.')
+
+class HTTPRequestURITooLong(HTTPClientError):
+ code = 414
+ title = 'Request-URI Too Long'
+ explanation = ('The request URI was too long for this server.')
+
+class HTTPUnsupportedMediaType(HTTPClientError):
+ code = 415
+ title = 'Unsupported Media Type'
+ # override template since we need an environment variable
+ template = ('The request media type %(CONTENT_TYPE)s is not '
+ 'supported by this server.\r\n%(detail)s')
+
+class HTTPRequestRangeNotSatisfiable(HTTPClientError):
+ code = 416
+ title = 'Request Range Not Satisfiable'
+ explanation = ('The Range requested is not available.')
+
+class HTTPExpectationFailed(HTTPClientError):
+ code = 417
+ title = 'Expectation Failed'
+ explanation = ('Expectation failed.')
+
+class HTTPTooManyRequests(HTTPClientError):
+ code = 429
+ title = 'Too Many Requests'
+ explanation = ('The client has sent too many requests to the server.')
+
+#
+# 5xx Server Error
+#
+# Response status codes beginning with the digit "5" indicate cases in
+# which the server is aware that it has erred or is incapable of
+# performing the request. Except when responding to a HEAD request, the
+# server SHOULD include an entity containing an explanation of the error
+# situation, and whether it is a temporary or permanent condition. User
+# agents SHOULD display any included entity to the user. These response
+# codes are applicable to any request method.
+#
+
+class HTTPServerError(HTTPError):
+ """
+ base class for the 500's, where the server is in-error
+
+ This is an error condition in which the server is presumed to be
+ in-error. This is usually unexpected, and thus requires a traceback;
+ ideally, opening a support ticket for the customer. Unless specialized,
+ this is a '500 Internal Server Error'
+ """
+ code = 500
+ title = 'Internal Server Error'
+ explanation = (
+ 'The server has either erred or is incapable of performing\r\n'
+ 'the requested operation.\r\n')
+
+class HTTPInternalServerError(HTTPServerError):
+ pass
+
+class HTTPNotImplemented(HTTPServerError):
+ code = 501
+ title = 'Not Implemented'
+ # override template since we need an environment variable
+ template = ('The request method %(REQUEST_METHOD)s is not implemented '
+ 'for this server.\r\n%(detail)s')
+
+class HTTPBadGateway(HTTPServerError):
+ code = 502
+ title = 'Bad Gateway'
+ explanation = ('Bad gateway.')
+
+class HTTPServiceUnavailable(HTTPServerError):
+ code = 503
+ title = 'Service Unavailable'
+ explanation = ('The server is currently unavailable. '
+ 'Please try again at a later time.')
+
+class HTTPGatewayTimeout(HTTPServerError):
+ code = 504
+ title = 'Gateway Timeout'
+ explanation = ('The gateway has timed out.')
+
+class HTTPVersionNotSupported(HTTPServerError):
+ code = 505
+ title = 'HTTP Version Not Supported'
+ explanation = ('The HTTP version is not supported.')
+
+# abstract HTTP related exceptions
+__all__ = ['HTTPException', 'HTTPRedirection', 'HTTPError' ]
+
+_exceptions = {}
+for name, value in globals().items():
+ if (isinstance(value, (type, types.ClassType)) and
+ issubclass(value, HTTPException) and
+ value.code):
+ _exceptions[value.code] = value
+ __all__.append(name)
+
+def get_exception(code):
+ return _exceptions[code]
+
+############################################################
+## Middleware implementation:
+############################################################
+
+class HTTPExceptionHandler(object):
+ """
+ catches exceptions and turns them into proper HTTP responses
+
+ This middleware catches any exceptions (which are subclasses of
+ ``HTTPException``) and turns them into proper HTTP responses.
+ Note if the headers have already been sent, the stack trace is
+ always maintained as this indicates a programming error.
+
+ Note that you must raise the exception before returning the
+ app_iter, and you cannot use this with generator apps that don't
+ raise an exception until after their app_iter is iterated over.
+ """
+
+ def __init__(self, application, warning_level=None):
+ assert not warning_level or ( warning_level > 99 and
+ warning_level < 600)
+ if warning_level is not None:
+ import warnings
+ warnings.warn('The warning_level parameter is not used or supported',
+ DeprecationWarning, 2)
+ self.warning_level = warning_level or 500
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ environ['paste.httpexceptions'] = self
+ environ.setdefault('paste.expected_exceptions',
+ []).append(HTTPException)
+ try:
+ return self.application(environ, start_response)
+ except HTTPException, exc:
+ return exc(environ, start_response)
+
+def middleware(*args, **kw):
+ import warnings
+ # deprecated 13 dec 2005
+ warnings.warn('httpexceptions.middleware is deprecated; use '
+ 'make_middleware or HTTPExceptionHandler instead',
+ DeprecationWarning, 2)
+ return make_middleware(*args, **kw)
+
+def make_middleware(app, global_conf=None, warning_level=None):
+ """
+ ``httpexceptions`` middleware; this catches any
+ ``paste.httpexceptions.HTTPException`` exceptions (exceptions like
+ ``HTTPNotFound``, ``HTTPMovedPermanently``, etc) and turns them
+ into proper HTTP responses.
+
+ ``warning_level`` can be an integer corresponding to an HTTP code.
+ Any code over that value will be passed 'up' the chain, potentially
+ reported on by another piece of middleware.
+ """
+ if warning_level:
+ warning_level = int(warning_level)
+ return HTTPExceptionHandler(app, warning_level=warning_level)
+
+__all__.extend(['HTTPExceptionHandler', 'get_exception'])
diff --git a/paste/httpheaders.py b/paste/httpheaders.py
new file mode 100644
index 0000000..728fa39
--- /dev/null
+++ b/paste/httpheaders.py
@@ -0,0 +1,1097 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Ian Bicking, Clark C. Evans and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# Some of this code was funded by: http://prometheusresearch.com
+"""
+HTTP Message Header Fields (see RFC 4229)
+
+This contains general support for HTTP/1.1 message headers [1]_ in a
+manner that supports WSGI ``environ`` [2]_ and ``response_headers``
+[3]_. Specifically, this module defines a ``HTTPHeader`` class whose
+instances correspond to field-name items. The actual field-content for
+the message-header is stored in the appropriate WSGI collection (either
+the ``environ`` for requests, or ``response_headers`` for responses).
+
+Each ``HTTPHeader`` instance is a callable (defining ``__call__``)
+that takes one of the following:
+
+ - an ``environ`` dictionary, returning the corresponding header
+ value by according to the WSGI's ``HTTP_`` prefix mechanism, e.g.,
+ ``USER_AGENT(environ)`` returns ``environ.get('HTTP_USER_AGENT')``
+
+ - a ``response_headers`` list, giving a comma-delimited string for
+ each corresponding ``header_value`` tuple entries (see below).
+
+ - a sequence of string ``*args`` that are comma-delimited into
+ a single string value: ``CONTENT_TYPE("text/html","text/plain")``
+ returns ``"text/html, text/plain"``
+
+ - a set of ``**kwargs`` keyword arguments that are used to create
+ a header value, in a manner dependent upon the particular header in
+ question (to make value construction easier and error-free):
+ ``CONTENT_DISPOSITION(max_age=CONTENT_DISPOSITION.ONEWEEK)``
+ returns ``"public, max-age=60480"``
+
+Each ``HTTPHeader`` instance also provides several methods to act on
+a WSGI collection, for removing and setting header values.
+
+ ``delete(collection)``
+
+ This method removes all entries of the corresponding header from
+ the given collection (``environ`` or ``response_headers``), e.g.,
+ ``USER_AGENT.delete(environ)`` deletes the 'HTTP_USER_AGENT' entry
+ from the ``environ``.
+
+ ``update(collection, *args, **kwargs)``
+
+ This method does an in-place replacement of the given header entry,
+ for example: ``CONTENT_LENGTH(response_headers,len(body))``
+
+ The first argument is a valid ``environ`` dictionary or
+ ``response_headers`` list; remaining arguments are passed on to
+ ``__call__(*args, **kwargs)`` for value construction.
+
+ ``apply(collection, **kwargs)``
+
+ This method is similar to update, only that it may affect other
+ headers. For example, according to recommendations in RFC 2616,
+ certain Cache-Control configurations should also set the
+ ``Expires`` header for HTTP/1.0 clients. By default, ``apply()``
+ is simply ``update()`` but limited to keyword arguments.
+
+This particular approach to managing headers within a WSGI collection
+has several advantages:
+
+ 1. Typos in the header name are easily detected since they become a
+ ``NameError`` when executed. The approach of using header strings
+ directly can be problematic; for example, the following should
+ return ``None`` : ``environ.get("HTTP_ACCEPT_LANGUAGES")``
+
+ 2. For specific headers with validation, using ``__call__`` will
+ result in an automatic header value check. For example, the
+ _ContentDisposition header will reject a value having ``maxage``
+ or ``max_age`` (the appropriate parameter is ``max-age`` ).
+
+ 3. When appending/replacing headers, the field-name has the suggested
+ RFC capitalization (e.g. ``Content-Type`` or ``ETag``) for
+ user-agents that incorrectly use case-sensitive matches.
+
+ 4. Some headers (such as ``Content-Type``) are 0, that is,
+ only one entry of this type may occur in a given set of
+ ``response_headers``. This module knows about those cases and
+ enforces this cardinality constraint.
+
+ 5. The exact details of WSGI header management are abstracted so
+ the programmer need not worry about operational differences
+ between ``environ`` dictionary or ``response_headers`` list.
+
+ 6. Sorting of ``HTTPHeaders`` is done following the RFC suggestion
+ that general-headers come first, followed by request and response
+ headers, and finishing with entity-headers.
+
+ 7. Special care is given to exceptional cases such as Set-Cookie
+ which violates the RFC's recommendation about combining header
+ content into a single entry using comma separation.
+
+A particular difficulty with HTTP message headers is a categorization
+of sorts as described in section 4.2:
+
+ Multiple message-header fields with the same field-name MAY be
+ present in a message if and only if the entire field-value for
+ that header field is defined as a comma-separated list [i.e.,
+ #(values)]. It MUST be possible to combine the multiple header
+ fields into one "field-name: field-value" pair, without changing
+ the semantics of the message, by appending each subsequent
+ field-value to the first, each separated by a comma.
+
+This creates three fundamentally different kinds of headers:
+
+ - Those that do not have a #(values) production, and hence are
+ singular and may only occur once in a set of response fields;
+ this case is handled by the ``_SingleValueHeader`` subclass.
+
+ - Those which have the #(values) production and follow the
+ combining rule outlined above; our ``_MultiValueHeader`` case.
+
+ - Those which are multi-valued, but cannot be combined (such as the
+ ``Set-Cookie`` header due to its ``Expires`` parameter); or where
+ combining them into a single header entry would cause common
+ user-agents to fail (``WWW-Authenticate``, ``Warning``) since
+ they fail to handle dates even when properly quoted. This case
+ is handled by ``_MultiEntryHeader``.
+
+Since this project does not have time to provide rigorous support
+and validation for all headers, it does a basic construction of
+headers listed in RFC 2616 (plus a few others) so that they can
+be obtained by simply doing ``from paste.httpheaders import *``;
+the name of the header instance is the "common name" less any
+dashes to give CamelCase style names.
+
+.. [1] http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
+.. [2] http://www.python.org/peps/pep-0333.html#environ-variables
+.. [3] http://www.python.org/peps/pep-0333.html#the-start-response-callable
+
+"""
+import mimetypes
+import urllib2
+import re
+from rfc822 import formatdate, parsedate_tz, mktime_tz
+from time import time as now
+from httpexceptions import HTTPBadRequest
+
+__all__ = ['get_header', 'list_headers', 'normalize_headers',
+ 'HTTPHeader', 'EnvironVariable' ]
+
+class EnvironVariable(str):
+ """
+ a CGI ``environ`` variable as described by WSGI
+
+ This is a helper object so that standard WSGI ``environ`` variables
+ can be extracted w/o syntax error possibility.
+ """
+ def __call__(self, environ):
+ return environ.get(self,'')
+ def __repr__(self):
+ return '<EnvironVariable %s>' % self
+ def update(self, environ, value):
+ environ[self] = value
+REMOTE_USER = EnvironVariable("REMOTE_USER")
+REMOTE_SESSION = EnvironVariable("REMOTE_SESSION")
+AUTH_TYPE = EnvironVariable("AUTH_TYPE")
+REQUEST_METHOD = EnvironVariable("REQUEST_METHOD")
+SCRIPT_NAME = EnvironVariable("SCRIPT_NAME")
+PATH_INFO = EnvironVariable("PATH_INFO")
+
+for _name, _obj in globals().items():
+ if isinstance(_obj, EnvironVariable):
+ __all__.append(_name)
+
+_headers = {}
+
+class HTTPHeader(object):
+ """
+ an HTTP header
+
+ HTTPHeader instances represent a particular ``field-name`` of an
+ HTTP message header. They do not hold a field-value, but instead
+ provide operations that work on is corresponding values. Storage
+ of the actual field values is done with WSGI ``environ`` or
+ ``response_headers`` as appropriate. Typically, a sub-classes that
+ represent a specific HTTP header, such as _ContentDisposition, are
+ 0. Once constructed the HTTPHeader instances themselves
+ are immutable and stateless.
+
+ For purposes of documentation a "container" refers to either a
+ WSGI ``environ`` dictionary, or a ``response_headers`` list.
+
+ Member variables (and correspondingly constructor arguments).
+
+ ``name``
+
+ the ``field-name`` of the header, in "common form"
+ as presented in RFC 2616; e.g. 'Content-Type'
+
+ ``category``
+
+ one of 'general', 'request', 'response', or 'entity'
+
+ ``version``
+
+ version of HTTP (informational) with which the header should
+ be recognized
+
+ ``sort_order``
+
+ sorting order to be applied before sorting on
+ field-name when ordering headers in a response
+
+ Special Methods:
+
+ ``__call__``
+
+ The primary method of the HTTPHeader instance is to make
+ it a callable, it takes either a collection, a string value,
+ or keyword arguments and attempts to find/construct a valid
+ field-value
+
+ ``__lt__``
+
+ This method is used so that HTTPHeader objects can be
+ sorted in a manner suggested by RFC 2616.
+
+ ``__str__``
+
+ The string-value for instances of this class is
+ the ``field-name``.
+
+ Primary Methods:
+
+ ``delete()``
+
+ remove the all occurrences (if any) of the given
+ header in the collection provided
+
+ ``update()``
+
+ replaces (if they exist) all field-value items
+ in the given collection with the value provided
+
+ ``tuples()``
+
+ returns a set of (field-name, field-value) tuples
+ 5 for extending ``response_headers``
+
+ Custom Methods (these may not be implemented):
+
+ ``apply()``
+
+ similar to ``update``, but with two differences; first,
+ only keyword arguments can be used, and second, specific
+ sub-classes may introduce side-effects
+
+ ``parse()``
+
+ converts a string value of the header into a more usable
+ form, such as time in seconds for a date header, etc.
+
+ The collected versions of initialized header instances are immediately
+ registered and accessible through the ``get_header`` function. Do not
+ inherit from this directly, use one of ``_SingleValueHeader``,
+ ``_MultiValueHeader``, or ``_MultiEntryHeader`` as appropriate.
+ """
+
+ #
+ # Things which can be customized
+ #
+ version = '1.1'
+ category = 'general'
+ reference = ''
+ extensions = {}
+
+ def compose(self, **kwargs):
+ """
+ build header value from keyword arguments
+
+ This method is used to build the corresponding header value when
+ keyword arguments (or no arguments) were provided. The result
+ should be a sequence of values. For example, the ``Expires``
+ header takes a keyword argument ``time`` (e.g. time.time()) from
+ which it returns a the corresponding date.
+ """
+ raise NotImplementedError()
+
+ def parse(self, *args, **kwargs):
+ """
+ convert raw header value into more usable form
+
+ This method invokes ``values()`` with the arguments provided,
+ parses the header results, and then returns a header-specific
+ data structure corresponding to the header. For example, the
+ ``Expires`` header returns seconds (as returned by time.time())
+ """
+ raise NotImplementedError()
+
+ def apply(self, collection, **kwargs):
+ """
+ update the collection /w header value (may have side effects)
+
+ This method is similar to ``update`` only that usage may result
+ in other headers being changed as recommended by the corresponding
+ specification. The return value is defined by the particular
+ sub-class. For example, the ``_CacheControl.apply()`` sets the
+ ``Expires`` header in addition to its normal behavior.
+ """
+ self.update(collection, **kwargs)
+
+ #
+ # Things which are standardized (mostly)
+ #
+ def __new__(cls, name, category=None, reference=None, version=None):
+ """
+ construct a new ``HTTPHeader`` instance
+
+ We use the ``__new__`` operator to ensure that only one
+ ``HTTPHeader`` instance exists for each field-name, and to
+ register the header so that it can be found/enumerated.
+ """
+ self = get_header(name, raiseError=False)
+ if self:
+ # Allow the registration to happen again, but assert
+ # that everything is identical.
+ assert self.name == name, \
+ "duplicate registration with different capitalization"
+ assert self.category == category, \
+ "duplicate registration with different category"
+ assert cls == self.__class__, \
+ "duplicate registration with different class"
+ return self
+
+ self = object.__new__(cls)
+ self.name = name
+ assert isinstance(self.name, str)
+ self.category = category or self.category
+ self.version = version or self.version
+ self.reference = reference or self.reference
+ _headers[self.name.lower()] = self
+ self.sort_order = {'general': 1, 'request': 2,
+ 'response': 3, 'entity': 4 }[self.category]
+ self._environ_name = getattr(self, '_environ_name',
+ 'HTTP_'+ self.name.upper().replace("-","_"))
+ self._headers_name = getattr(self, '_headers_name',
+ self.name.lower())
+ assert self.version in ('1.1', '1.0', '0.9')
+ return self
+
+ def __str__(self):
+ return self.name
+
+ def __lt__(self, other):
+ """
+ sort header instances as specified by RFC 2616
+
+ Re-define sorting so that general headers are first, followed
+ by request/response headers, and then entity headers. The
+ list.sort() methods use the less-than operator for this purpose.
+ """
+ if isinstance(other, HTTPHeader):
+ if self.sort_order != other.sort_order:
+ return self.sort_order < other.sort_order
+ return self.name < other.name
+ return False
+
+ def __repr__(self):
+ ref = self.reference and (' (%s)' % self.reference) or ''
+ return '<%s %s%s>' % (self.__class__.__name__, self.name, ref)
+
+ def values(self, *args, **kwargs):
+ """
+ find/construct field-value(s) for the given header
+
+ Resolution is done according to the following arguments:
+
+ - If only keyword arguments are given, then this is equivalent
+ to ``compose(**kwargs)``.
+
+ - If the first (and only) argument is a dict, it is assumed
+ to be a WSGI ``environ`` and the result of the corresponding
+ ``HTTP_`` entry is returned.
+
+ - If the first (and only) argument is a list, it is assumed
+ to be a WSGI ``response_headers`` and the field-value(s)
+ for this header are collected and returned.
+
+ - In all other cases, the arguments are collected, checked that
+ they are string values, possibly verified by the header's
+ logic, and returned.
+
+ At this time it is an error to provide keyword arguments if args
+ is present (this might change). It is an error to provide both
+ a WSGI object and also string arguments. If no arguments are
+ provided, then ``compose()`` is called to provide a default
+ value for the header; if there is not default it is an error.
+ """
+ if not args:
+ return self.compose(**kwargs)
+ if list == type(args[0]):
+ assert 1 == len(args)
+ result = []
+ name = self.name.lower()
+ for value in [value for header, value in args[0]
+ if header.lower() == name]:
+ result.append(value)
+ return result
+ if dict == type(args[0]):
+ assert 1 == len(args) and 'wsgi.version' in args[0]
+ value = args[0].get(self._environ_name)
+ if not value:
+ return ()
+ return (value,)
+ for item in args:
+ assert not type(item) in (dict, list)
+ return args
+
+ def __call__(self, *args, **kwargs):
+ """
+ converts ``values()`` into a string value
+
+ This method converts the results of ``values()`` into a string
+ value for common usage. By default, it is asserted that only
+ one value exists; if you need to access all values then either
+ call ``values()`` directly, or inherit ``_MultiValueHeader``
+ which overrides this method to return a comma separated list of
+ values as described by section 4.2 of RFC 2616.
+ """
+ values = self.values(*args, **kwargs)
+ assert isinstance(values, (tuple, list))
+ if not values:
+ return ''
+ assert len(values) == 1, "more than one value: %s" % repr(values)
+ return str(values[0]).strip()
+
+ def delete(self, collection):
+ """
+ removes all occurances of the header from the collection provided
+ """
+ if type(collection) == dict:
+ if self._environ_name in collection:
+ del collection[self._environ_name]
+ return self
+ assert list == type(collection)
+ i = 0
+ while i < len(collection):
+ if collection[i][0].lower() == self._headers_name:
+ del collection[i]
+ continue
+ i += 1
+
+ def update(self, collection, *args, **kwargs):
+ """
+ updates the collection with the provided header value
+
+ This method replaces (in-place when possible) all occurrences of
+ the given header with the provided value. If no value is
+ provided, this is the same as ``remove`` (note that this case
+ can only occur if the target is a collection w/o a corresponding
+ header value). The return value is the new header value (which
+ could be a list for ``_MultiEntryHeader`` instances).
+ """
+ value = self.__call__(*args, **kwargs)
+ if not value:
+ self.delete(collection)
+ return
+ if type(collection) == dict:
+ collection[self._environ_name] = value
+ return
+ assert list == type(collection)
+ i = 0
+ found = False
+ while i < len(collection):
+ if collection[i][0].lower() == self._headers_name:
+ if found:
+ del collection[i]
+ continue
+ collection[i] = (self.name, value)
+ found = True
+ i += 1
+ if not found:
+ collection.append((self.name, value))
+
+ def tuples(self, *args, **kwargs):
+ value = self.__call__(*args, **kwargs)
+ if not value:
+ return ()
+ return [(self.name, value)]
+
+class _SingleValueHeader(HTTPHeader):
+ """
+ a ``HTTPHeader`` with exactly a single value
+
+ This is the default behavior of ``HTTPHeader`` where returning a
+ the string-value of headers via ``__call__`` assumes that only
+ a single value exists.
+ """
+ pass
+
+class _MultiValueHeader(HTTPHeader):
+ """
+ a ``HTTPHeader`` with one or more values
+
+ The field-value for these header instances is is allowed to be more
+ than one value; whereby the ``__call__`` method returns a comma
+ separated list as described by section 4.2 of RFC 2616.
+ """
+
+ def __call__(self, *args, **kwargs):
+ results = self.values(*args, **kwargs)
+ if not results:
+ return ''
+ return ", ".join([str(v).strip() for v in results])
+
+ def parse(self, *args, **kwargs):
+ value = self.__call__(*args, **kwargs)
+ values = value.split(',')
+ return [
+ v.strip() for v in values
+ if v.strip()]
+
+class _MultiEntryHeader(HTTPHeader):
+ """
+ a multi-value ``HTTPHeader`` where items cannot be combined with a comma
+
+ This header is multi-valued, but the values should not be combined
+ with a comma since the header is not in compliance with RFC 2616
+ (Set-Cookie due to Expires parameter) or which common user-agents do
+ not behave well when the header values are combined.
+ """
+
+ def update(self, collection, *args, **kwargs):
+ assert list == type(collection), "``environ`` may not be updated"
+ self.delete(collection)
+ collection.extend(self.tuples(*args, **kwargs))
+
+ def tuples(self, *args, **kwargs):
+ values = self.values(*args, **kwargs)
+ if not values:
+ return ()
+ return [(self.name, value.strip()) for value in values]
+
+def get_header(name, raiseError=True):
+ """
+ find the given ``HTTPHeader`` instance
+
+ This function finds the corresponding ``HTTPHeader`` for the
+ ``name`` provided. So that python-style names can be used,
+ underscores are converted to dashes before the lookup.
+ """
+ retval = _headers.get(str(name).strip().lower().replace("_","-"))
+ if not retval and raiseError:
+ raise AssertionError("'%s' is an unknown header" % name)
+ return retval
+
+def list_headers(general=None, request=None, response=None, entity=None):
+ " list all headers for a given category "
+ if not (general or request or response or entity):
+ general = request = response = entity = True
+ search = []
+ for (bool, strval) in ((general, 'general'), (request, 'request'),
+ (response, 'response'), (entity, 'entity')):
+ if bool:
+ search.append(strval)
+ return [head for head in _headers.values() if head.category in search]
+
+def normalize_headers(response_headers, strict=True):
+ """
+ sort headers as suggested by RFC 2616
+
+ This alters the underlying response_headers to use the common
+ name for each header; as well as sorting them with general
+ headers first, followed by request/response headers, then
+ entity headers, and unknown headers last.
+ """
+ category = {}
+ for idx in range(len(response_headers)):
+ (key, val) = response_headers[idx]
+ head = get_header(key, strict)
+ if not head:
+ newhead = '-'.join([x.capitalize() for x in
+ key.replace("_","-").split("-")])
+ response_headers[idx] = (newhead, val)
+ category[newhead] = 4
+ continue
+ response_headers[idx] = (str(head), val)
+ category[str(head)] = head.sort_order
+ def compare(a, b):
+ ac = category[a[0]]
+ bc = category[b[0]]
+ if ac == bc:
+ return cmp(a[0], b[0])
+ return cmp(ac, bc)
+ response_headers.sort(compare)
+
+class _DateHeader(_SingleValueHeader):
+ """
+ handle date-based headers
+
+ This extends the ``_SingleValueHeader`` object with specific
+ treatment of time values:
+
+ - It overrides ``compose`` to provide a sole keyword argument
+ ``time`` which is an offset in seconds from the current time.
+
+ - A ``time`` method is provided which parses the given value
+ and returns the current time value.
+ """
+
+ def compose(self, time=None, delta=None):
+ time = time or now()
+ if delta:
+ assert type(delta) == int
+ time += delta
+ return (formatdate(time),)
+
+ def parse(self, *args, **kwargs):
+ """ return the time value (in seconds since 1970) """
+ value = self.__call__(*args, **kwargs)
+ if value:
+ try:
+ return mktime_tz(parsedate_tz(value))
+ except (TypeError, OverflowError):
+ raise HTTPBadRequest((
+ "Received an ill-formed timestamp for %s: %s\r\n") %
+ (self.name, value))
+
+#
+# Following are specific HTTP headers. Since these classes are mostly
+# singletons, there is no point in keeping the class around once it has
+# been instantiated, so we use the same name.
+#
+
+class _CacheControl(_MultiValueHeader):
+ """
+ Cache-Control, RFC 2616 14.9 (use ``CACHE_CONTROL``)
+
+ This header can be constructed (using keyword arguments), by
+ first specifying one of the following mechanisms:
+
+ ``public``
+
+ if True, this argument specifies that the
+ response, as a whole, may be cashed.
+
+ ``private``
+
+ if True, this argument specifies that the response, as a
+ whole, may be cashed; this implementation does not support
+ the enumeration of private fields
+
+ ``no_cache``
+
+ if True, this argument specifies that the response, as a
+ whole, may not be cashed; this implementation does not
+ support the enumeration of private fields
+
+ In general, only one of the above three may be True, the other 2
+ must then be False or None. If all three are None, then the cache
+ is assumed to be ``public``. Following one of these mechanism
+ specifiers are various modifiers:
+
+ ``no_store``
+
+ indicates if content may be stored on disk;
+ otherwise cache is limited to memory (note:
+ users can still save the data, this applies
+ to intermediate caches)
+
+ ``max_age``
+
+ the maximum duration (in seconds) for which
+ the content should be cached; if ``no-cache``
+ is specified, this defaults to 0 seconds
+
+ ``s_maxage``
+
+ the maximum duration (in seconds) for which the
+ content should be allowed in a shared cache.
+
+ ``no_transform``
+
+ specifies that an intermediate cache should
+ not convert the content from one type to
+ another (e.g. transform a BMP to a PNG).
+
+ ``extensions``
+
+ gives additional cache-control extensions,
+ such as items like, community="UCI" (14.9.6)
+
+ The usage of ``apply()`` on this header has side-effects. As
+ recommended by RFC 2616, if ``max_age`` is provided, then then the
+ ``Expires`` header is also calculated for HTTP/1.0 clients and
+ proxies (this is done at the time ``apply()`` is called). For
+ ``no-cache`` and for ``private`` cases, we either do not want the
+ response cached or do not want any response accidently returned to
+ other users; so to prevent this case, we set the ``Expires`` header
+ to the time of the request, signifying to HTTP/1.0 transports that
+ the content isn't to be cached. If you are using SSL, your
+ communication is already "private", so to work with HTTP/1.0
+ browsers over SSL, consider specifying your cache as ``public`` as
+ the distinction between public and private is moot.
+ """
+
+ # common values for max-age; "good enough" approximates
+ ONE_HOUR = 60*60
+ ONE_DAY = ONE_HOUR * 24
+ ONE_WEEK = ONE_DAY * 7
+ ONE_MONTH = ONE_DAY * 30
+ ONE_YEAR = ONE_WEEK * 52
+
+ def _compose(self, public=None, private=None, no_cache=None,
+ no_store=False, max_age=None, s_maxage=None,
+ no_transform=False, **extensions):
+ assert isinstance(max_age, (type(None), int))
+ assert isinstance(s_maxage, (type(None), int))
+ expires = 0
+ result = []
+ if private is True:
+ assert not public and not no_cache and not s_maxage
+ result.append('private')
+ elif no_cache is True:
+ assert not public and not private and not max_age
+ result.append('no-cache')
+ else:
+ assert public is None or public is True
+ assert not private and not no_cache
+ expires = max_age
+ result.append('public')
+ if no_store:
+ result.append('no-store')
+ if no_transform:
+ result.append('no-transform')
+ if max_age is not None:
+ result.append('max-age=%d' % max_age)
+ if s_maxage is not None:
+ result.append('s-maxage=%d' % s_maxage)
+ for (k, v) in extensions.items():
+ if k not in self.extensions:
+ raise AssertionError("unexpected extension used: '%s'" % k)
+ result.append('%s="%s"' % (k.replace("_", "-"), v))
+ return (result, expires)
+
+ def compose(self, **kwargs):
+ (result, expires) = self._compose(**kwargs)
+ return result
+
+ def apply(self, collection, **kwargs):
+ """ returns the offset expiration in seconds """
+ (result, expires) = self._compose(**kwargs)
+ if expires is not None:
+ EXPIRES.update(collection, delta=expires)
+ self.update(collection, *result)
+ return expires
+
+_CacheControl('Cache-Control', 'general', 'RFC 2616, 14.9')
+
+class _ContentType(_SingleValueHeader):
+ """
+ Content-Type, RFC 2616 section 14.17
+
+ Unlike other headers, use the CGI variable instead.
+ """
+ version = '1.0'
+ _environ_name = 'CONTENT_TYPE'
+
+ # common mimetype constants
+ UNKNOWN = 'application/octet-stream'
+ TEXT_PLAIN = 'text/plain'
+ TEXT_HTML = 'text/html'
+ TEXT_XML = 'text/xml'
+
+ def compose(self, major=None, minor=None, charset=None):
+ if not major:
+ if minor in ('plain', 'html', 'xml'):
+ major = 'text'
+ else:
+ assert not minor and not charset
+ return (self.UNKNOWN,)
+ if not minor:
+ minor = "*"
+ result = "%s/%s" % (major, minor)
+ if charset:
+ result += "; charset=%s" % charset
+ return (result,)
+
+_ContentType('Content-Type', 'entity', 'RFC 2616, 14.17')
+
+class _ContentLength(_SingleValueHeader):
+ """
+ Content-Length, RFC 2616 section 14.13
+
+ Unlike other headers, use the CGI variable instead.
+ """
+ version = "1.0"
+ _environ_name = 'CONTENT_LENGTH'
+
+_ContentLength('Content-Length', 'entity', 'RFC 2616, 14.13')
+
+class _ContentDisposition(_SingleValueHeader):
+ """
+ Content-Disposition, RFC 2183 (use ``CONTENT_DISPOSITION``)
+
+ This header can be constructed (using keyword arguments),
+ by first specifying one of the following mechanisms:
+
+ ``attachment``
+
+ if True, this specifies that the content should not be
+ shown in the browser and should be handled externally,
+ even if the browser could render the content
+
+ ``inline``
+
+ exclusive with attachment; indicates that the content
+ should be rendered in the browser if possible, but
+ otherwise it should be handled externally
+
+ Only one of the above 2 may be True. If both are None, then
+ the disposition is assumed to be an ``attachment``. These are
+ distinct fields since support for field enumeration may be
+ added in the future.
+
+ ``filename``
+
+ the filename parameter, if any, to be reported; if
+ this is None, then the current object's filename
+ attribute is used
+
+ The usage of ``apply()`` on this header has side-effects. If
+ filename is provided, and Content-Type is not set or is
+ 'application/octet-stream', then the mimetypes.guess is used to
+ upgrade the Content-Type setting.
+ """
+
+ def _compose(self, attachment=None, inline=None, filename=None):
+ result = []
+ if inline is True:
+ assert not attachment
+ result.append('inline')
+ else:
+ assert not inline
+ result.append('attachment')
+ if filename:
+ assert '"' not in filename
+ filename = filename.split("/")[-1]
+ filename = filename.split("\\")[-1]
+ result.append('filename="%s"' % filename)
+ return (("; ".join(result),), filename)
+
+ def compose(self, **kwargs):
+ (result, mimetype) = self._compose(**kwargs)
+ return result
+
+ def apply(self, collection, **kwargs):
+ """ return the new Content-Type side-effect value """
+ (result, filename) = self._compose(**kwargs)
+ mimetype = CONTENT_TYPE(collection)
+ if filename and (not mimetype or CONTENT_TYPE.UNKNOWN == mimetype):
+ mimetype, _ = mimetypes.guess_type(filename)
+ if mimetype and CONTENT_TYPE.UNKNOWN != mimetype:
+ CONTENT_TYPE.update(collection, mimetype)
+ self.update(collection, *result)
+ return mimetype
+
+_ContentDisposition('Content-Disposition', 'entity', 'RFC 2183')
+
+class _IfModifiedSince(_DateHeader):
+ """
+ If-Modified-Since, RFC 2616 section 14.25
+ """
+ version = '1.0'
+
+ def __call__(self, *args, **kwargs):
+ """
+ Split the value on ';' incase the header includes extra attributes. E.g.
+ IE 6 is known to send:
+ If-Modified-Since: Sun, 25 Jun 2006 20:36:35 GMT; length=1506
+ """
+ return _DateHeader.__call__(self, *args, **kwargs).split(';', 1)[0]
+
+ def parse(self, *args, **kwargs):
+ value = _DateHeader.parse(self, *args, **kwargs)
+ if value and value > now():
+ raise HTTPBadRequest((
+ "Please check your system clock.\r\n"
+ "According to this server, the time provided in the\r\n"
+ "%s header is in the future.\r\n") % self.name)
+ return value
+_IfModifiedSince('If-Modified-Since', 'request', 'RFC 2616, 14.25')
+
+class _Range(_MultiValueHeader):
+ """
+ Range, RFC 2616 14.35 (use ``RANGE``)
+
+ According to section 14.16, the response to this message should be a
+ 206 Partial Content and that if multiple non-overlapping byte ranges
+ are requested (it is an error to request multiple overlapping
+ ranges) the result should be sent as multipart/byteranges mimetype.
+
+ The server should respond with '416 Requested Range Not Satisfiable'
+ if the requested ranges are out-of-bounds. The specification also
+ indicates that a syntax error in the Range request should result in
+ the header being ignored rather than a '400 Bad Request'.
+ """
+
+ def parse(self, *args, **kwargs):
+ """
+ Returns a tuple (units, list), where list is a sequence of
+ (begin, end) tuples; and end is None if it was not provided.
+ """
+ value = self.__call__(*args, **kwargs)
+ if not value:
+ return None
+ ranges = []
+ last_end = -1
+ try:
+ (units, range) = value.split("=", 1)
+ units = units.strip().lower()
+ for item in range.split(","):
+ (begin, end) = item.split("-")
+ if not begin.strip():
+ begin = 0
+ else:
+ begin = int(begin)
+ if begin <= last_end:
+ raise ValueError()
+ if not end.strip():
+ end = None
+ else:
+ end = int(end)
+ last_end = end
+ ranges.append((begin, end))
+ except ValueError:
+ # In this case where the Range header is malformed,
+ # section 14.16 says to treat the request as if the
+ # Range header was not present. How do I log this?
+ return None
+ return (units, ranges)
+_Range('Range', 'request', 'RFC 2616, 14.35')
+
+class _AcceptLanguage(_MultiValueHeader):
+ """
+ Accept-Language, RFC 2616 section 14.4
+ """
+
+ def parse(self, *args, **kwargs):
+ """
+ Return a list of language tags sorted by their "q" values. For example,
+ "en-us,en;q=0.5" should return ``["en-us", "en"]``. If there is no
+ ``Accept-Language`` header present, default to ``[]``.
+ """
+ header = self.__call__(*args, **kwargs)
+ if header is None:
+ return []
+ langs = [v for v in header.split(",") if v]
+ qs = []
+ for lang in langs:
+ pieces = lang.split(";")
+ lang, params = pieces[0].strip().lower(), pieces[1:]
+ q = 1
+ for param in params:
+ if '=' not in param:
+ # Malformed request; probably a bot, we'll ignore
+ continue
+ lvalue, rvalue = param.split("=")
+ lvalue = lvalue.strip().lower()
+ rvalue = rvalue.strip()
+ if lvalue == "q":
+ q = float(rvalue)
+ qs.append((lang, q))
+ qs.sort(lambda a, b: -cmp(a[1], b[1]))
+ return [lang for (lang, q) in qs]
+_AcceptLanguage('Accept-Language', 'request', 'RFC 2616, 14.4')
+
+class _AcceptRanges(_MultiValueHeader):
+ """
+ Accept-Ranges, RFC 2616 section 14.5
+ """
+ def compose(self, none=None, bytes=None):
+ if bytes:
+ return ('bytes',)
+ return ('none',)
+_AcceptRanges('Accept-Ranges', 'response', 'RFC 2616, 14.5')
+
+class _ContentRange(_SingleValueHeader):
+ """
+ Content-Range, RFC 2616 section 14.6
+ """
+ def compose(self, first_byte=None, last_byte=None, total_length=None):
+ retval = "bytes %d-%d/%d" % (first_byte, last_byte, total_length)
+ assert last_byte == -1 or first_byte <= last_byte
+ assert last_byte < total_length
+ return (retval,)
+_ContentRange('Content-Range', 'entity', 'RFC 2616, 14.6')
+
+class _Authorization(_SingleValueHeader):
+ """
+ Authorization, RFC 2617 (RFC 2616, 14.8)
+ """
+ def compose(self, digest=None, basic=None, username=None, password=None,
+ challenge=None, path=None, method=None):
+ assert username and password
+ if basic or not challenge:
+ assert not digest
+ userpass = "%s:%s" % (username.strip(), password.strip())
+ return "Basic %s" % userpass.encode('base64').strip()
+ assert challenge and not basic
+ path = path or "/"
+ (_, realm) = challenge.split('realm="')
+ (realm, _) = realm.split('"', 1)
+ auth = urllib2.AbstractDigestAuthHandler()
+ auth.add_password(realm, path, username, password)
+ (token, challenge) = challenge.split(' ', 1)
+ chal = urllib2.parse_keqv_list(urllib2.parse_http_list(challenge))
+ class FakeRequest(object):
+ def get_full_url(self):
+ return path
+ def has_data(self):
+ return False
+ def get_method(self):
+ return method or "GET"
+ get_selector = get_full_url
+ retval = "Digest %s" % auth.get_authorization(FakeRequest(), chal)
+ return (retval,)
+_Authorization('Authorization', 'request', 'RFC 2617')
+
+#
+# For now, construct a minimalistic version of the field-names; at a
+# later date more complicated headers may sprout content constructors.
+# The items commented out have concrete variants.
+#
+for (name, category, version, style, comment) in \
+(("Accept" ,'request' ,'1.1','multi-value','RFC 2616, 14.1' )
+,("Accept-Charset" ,'request' ,'1.1','multi-value','RFC 2616, 14.2' )
+,("Accept-Encoding" ,'request' ,'1.1','multi-value','RFC 2616, 14.3' )
+#,("Accept-Language" ,'request' ,'1.1','multi-value','RFC 2616, 14.4' )
+#,("Accept-Ranges" ,'response','1.1','multi-value','RFC 2616, 14.5' )
+,("Age" ,'response','1.1','singular' ,'RFC 2616, 14.6' )
+,("Allow" ,'entity' ,'1.0','multi-value','RFC 2616, 14.7' )
+#,("Authorization" ,'request' ,'1.0','singular' ,'RFC 2616, 14.8' )
+#,("Cache-Control" ,'general' ,'1.1','multi-value','RFC 2616, 14.9' )
+,("Cookie" ,'request' ,'1.0','multi-value','RFC 2109/Netscape')
+,("Connection" ,'general' ,'1.1','multi-value','RFC 2616, 14.10')
+,("Content-Encoding" ,'entity' ,'1.0','multi-value','RFC 2616, 14.11')
+#,("Content-Disposition",'entity' ,'1.1','multi-value','RFC 2616, 15.5' )
+,("Content-Language" ,'entity' ,'1.1','multi-value','RFC 2616, 14.12')
+#,("Content-Length" ,'entity' ,'1.0','singular' ,'RFC 2616, 14.13')
+,("Content-Location" ,'entity' ,'1.1','singular' ,'RFC 2616, 14.14')
+,("Content-MD5" ,'entity' ,'1.1','singular' ,'RFC 2616, 14.15')
+#,("Content-Range" ,'entity' ,'1.1','singular' ,'RFC 2616, 14.16')
+#,("Content-Type" ,'entity' ,'1.0','singular' ,'RFC 2616, 14.17')
+,("Date" ,'general' ,'1.0','date-header','RFC 2616, 14.18')
+,("ETag" ,'response','1.1','singular' ,'RFC 2616, 14.19')
+,("Expect" ,'request' ,'1.1','multi-value','RFC 2616, 14.20')
+,("Expires" ,'entity' ,'1.0','date-header','RFC 2616, 14.21')
+,("From" ,'request' ,'1.0','singular' ,'RFC 2616, 14.22')
+,("Host" ,'request' ,'1.1','singular' ,'RFC 2616, 14.23')
+,("If-Match" ,'request' ,'1.1','multi-value','RFC 2616, 14.24')
+#,("If-Modified-Since" ,'request' ,'1.0','date-header','RFC 2616, 14.25')
+,("If-None-Match" ,'request' ,'1.1','multi-value','RFC 2616, 14.26')
+,("If-Range" ,'request' ,'1.1','singular' ,'RFC 2616, 14.27')
+,("If-Unmodified-Since",'request' ,'1.1','date-header' ,'RFC 2616, 14.28')
+,("Last-Modified" ,'entity' ,'1.0','date-header','RFC 2616, 14.29')
+,("Location" ,'response','1.0','singular' ,'RFC 2616, 14.30')
+,("Max-Forwards" ,'request' ,'1.1','singular' ,'RFC 2616, 14.31')
+,("Pragma" ,'general' ,'1.0','multi-value','RFC 2616, 14.32')
+,("Proxy-Authenticate" ,'response','1.1','multi-value','RFC 2616, 14.33')
+,("Proxy-Authorization",'request' ,'1.1','singular' ,'RFC 2616, 14.34')
+#,("Range" ,'request' ,'1.1','multi-value','RFC 2616, 14.35')
+,("Referer" ,'request' ,'1.0','singular' ,'RFC 2616, 14.36')
+,("Retry-After" ,'response','1.1','singular' ,'RFC 2616, 14.37')
+,("Server" ,'response','1.0','singular' ,'RFC 2616, 14.38')
+,("Set-Cookie" ,'response','1.0','multi-entry','RFC 2109/Netscape')
+,("TE" ,'request' ,'1.1','multi-value','RFC 2616, 14.39')
+,("Trailer" ,'general' ,'1.1','multi-value','RFC 2616, 14.40')
+,("Transfer-Encoding" ,'general' ,'1.1','multi-value','RFC 2616, 14.41')
+,("Upgrade" ,'general' ,'1.1','multi-value','RFC 2616, 14.42')
+,("User-Agent" ,'request' ,'1.0','singular' ,'RFC 2616, 14.43')
+,("Vary" ,'response','1.1','multi-value','RFC 2616, 14.44')
+,("Via" ,'general' ,'1.1','multi-value','RFC 2616, 14.45')
+,("Warning" ,'general' ,'1.1','multi-entry','RFC 2616, 14.46')
+,("WWW-Authenticate" ,'response','1.0','multi-entry','RFC 2616, 14.47')):
+ klass = {'multi-value': _MultiValueHeader,
+ 'multi-entry': _MultiEntryHeader,
+ 'date-header': _DateHeader,
+ 'singular' : _SingleValueHeader}[style]
+ klass(name, category, comment, version).__doc__ = comment
+ del klass
+
+for head in _headers.values():
+ headname = head.name.replace("-","_").upper()
+ locals()[headname] = head
+ __all__.append(headname)
+
+__pudge_all__ = __all__[:]
+for _name, _obj in globals().items():
+ if isinstance(_obj, type) and issubclass(_obj, HTTPHeader):
+ __pudge_all__.append(_name)
diff --git a/paste/httpserver.py b/paste/httpserver.py
new file mode 100755
index 0000000..cd21713
--- /dev/null
+++ b/paste/httpserver.py
@@ -0,0 +1,1410 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+WSGI HTTP Server
+
+This is a minimalistic WSGI server using Python's built-in BaseHTTPServer;
+if pyOpenSSL is installed, it also provides SSL capabilities.
+"""
+
+# @@: add in protection against HTTP/1.0 clients who claim to
+# be 1.1 but do not send a Content-Length
+
+# @@: add support for chunked encoding, this is not a 1.1 server
+# till this is completed.
+
+import atexit
+import traceback
+import socket, sys, threading, urlparse, Queue, urllib
+import posixpath
+import time
+import thread
+import os
+from itertools import count
+from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+from SocketServer import ThreadingMixIn
+from paste.util import converters
+import logging
+try:
+ from paste.util import killthread
+except ImportError:
+ # Not available, probably no ctypes
+ killthread = None
+
+__all__ = ['WSGIHandlerMixin', 'WSGIServer', 'WSGIHandler', 'serve']
+__version__ = "0.5"
+
+class ContinueHook(object):
+ """
+ When a client request includes a 'Expect: 100-continue' header, then
+ it is the responsibility of the server to send 100 Continue when it
+ is ready for the content body. This allows authentication, access
+ levels, and other exceptions to be detected *before* bandwith is
+ spent on the request body.
+
+ This is a rfile wrapper that implements this functionality by
+ sending 100 Continue to the client immediately after the user
+ requests the content via a read() operation on the rfile stream.
+ After this response is sent, it becomes a pass-through object.
+ """
+
+ def __init__(self, rfile, write):
+ self._ContinueFile_rfile = rfile
+ self._ContinueFile_write = write
+ for attr in ('close', 'closed', 'fileno', 'flush',
+ 'mode', 'bufsize', 'softspace'):
+ if hasattr(rfile, attr):
+ setattr(self, attr, getattr(rfile, attr))
+ for attr in ('read', 'readline', 'readlines'):
+ if hasattr(rfile, attr):
+ setattr(self, attr, getattr(self, '_ContinueFile_' + attr))
+
+ def _ContinueFile_send(self):
+ self._ContinueFile_write("HTTP/1.1 100 Continue\r\n\r\n")
+ rfile = self._ContinueFile_rfile
+ for attr in ('read', 'readline', 'readlines'):
+ if hasattr(rfile, attr):
+ setattr(self, attr, getattr(rfile, attr))
+
+ def _ContinueFile_read(self, size=-1):
+ self._ContinueFile_send()
+ return self._ContinueFile_rfile.read(size)
+
+ def _ContinueFile_readline(self, size=-1):
+ self._ContinueFile_send()
+ return self._ContinueFile_rfile.readline(size)
+
+ def _ContinueFile_readlines(self, sizehint=0):
+ self._ContinueFile_send()
+ return self._ContinueFile_rfile.readlines(sizehint)
+
+class WSGIHandlerMixin:
+ """
+ WSGI mix-in for HTTPRequestHandler
+
+ This class is a mix-in to provide WSGI functionality to any
+ HTTPRequestHandler derivative (as provided in Python's BaseHTTPServer).
+ This assumes a ``wsgi_application`` handler on ``self.server``.
+ """
+ lookup_addresses = True
+
+ def log_request(self, *args, **kwargs):
+ """ disable success request logging
+
+ Logging transactions should not be part of a WSGI server,
+ if you want logging; look at paste.translogger
+ """
+ pass
+
+ def log_message(self, *args, **kwargs):
+ """ disable error message logging
+
+ Logging transactions should not be part of a WSGI server,
+ if you want logging; look at paste.translogger
+ """
+ pass
+
+ def version_string(self):
+ """ behavior that BaseHTTPServer should have had """
+ if not self.sys_version:
+ return self.server_version
+ else:
+ return self.server_version + ' ' + self.sys_version
+
+ def wsgi_write_chunk(self, chunk):
+ """
+ Write a chunk of the output stream; send headers if they
+ have not already been sent.
+ """
+ if not self.wsgi_headers_sent and not self.wsgi_curr_headers:
+ raise RuntimeError(
+ "Content returned before start_response called")
+ if not self.wsgi_headers_sent:
+ self.wsgi_headers_sent = True
+ (status, headers) = self.wsgi_curr_headers
+ code, message = status.split(" ", 1)
+ self.send_response(int(code), message)
+ #
+ # HTTP/1.1 compliance; either send Content-Length or
+ # signal that the connection is being closed.
+ #
+ send_close = True
+ for (k, v) in headers:
+ lk = k.lower()
+ if 'content-length' == lk:
+ send_close = False
+ if 'connection' == lk:
+ if 'close' == v.lower():
+ self.close_connection = 1
+ send_close = False
+ self.send_header(k, v)
+ if send_close:
+ self.close_connection = 1
+ self.send_header('Connection', 'close')
+
+ self.end_headers()
+ self.wfile.write(chunk)
+
+ def wsgi_start_response(self, status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if self.wsgi_headers_sent:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ else:
+ # In this case, we're going to assume that the
+ # higher-level code is currently handling the
+ # issue and returning a resonable response.
+ # self.log_error(repr(exc_info))
+ pass
+ finally:
+ exc_info = None
+ elif self.wsgi_curr_headers:
+ assert 0, "Attempt to set headers a second time w/o an exc_info"
+ self.wsgi_curr_headers = (status, response_headers)
+ return self.wsgi_write_chunk
+
+ def wsgi_setup(self, environ=None):
+ """
+ Setup the member variables used by this WSGI mixin, including
+ the ``environ`` and status member variables.
+
+ After the basic environment is created; the optional ``environ``
+ argument can be used to override any settings.
+ """
+
+ (scheme, netloc, path, query, fragment) = urlparse.urlsplit(self.path)
+ path = urllib.unquote(path)
+ endslash = path.endswith('/')
+ path = posixpath.normpath(path)
+ if endslash and path != '/':
+ # Put the slash back...
+ path += '/'
+ (server_name, server_port) = self.server.server_address[:2]
+
+ rfile = self.rfile
+ # We can put in the protection to keep from over-reading the
+ # file
+ try:
+ content_length = int(self.headers.get('Content-Length', '0'))
+ except ValueError:
+ content_length = 0
+ if '100-continue' == self.headers.get('Expect','').lower():
+ rfile = LimitedLengthFile(ContinueHook(rfile, self.wfile.write), content_length)
+ else:
+ if not hasattr(self.connection, 'get_context'):
+ # @@: LimitedLengthFile is currently broken in connection
+ # with SSL (sporatic errors that are diffcult to trace, but
+ # ones that go away when you don't use LimitedLengthFile)
+ rfile = LimitedLengthFile(rfile, content_length)
+
+ remote_address = self.client_address[0]
+ self.wsgi_environ = {
+ 'wsgi.version': (1,0)
+ ,'wsgi.url_scheme': 'http'
+ ,'wsgi.input': rfile
+ ,'wsgi.errors': sys.stderr
+ ,'wsgi.multithread': True
+ ,'wsgi.multiprocess': False
+ ,'wsgi.run_once': False
+ # CGI variables required by PEP-333
+ ,'REQUEST_METHOD': self.command
+ ,'SCRIPT_NAME': '' # application is root of server
+ ,'PATH_INFO': path
+ ,'QUERY_STRING': query
+ ,'CONTENT_TYPE': self.headers.get('Content-Type', '')
+ ,'CONTENT_LENGTH': self.headers.get('Content-Length', '0')
+ ,'SERVER_NAME': server_name
+ ,'SERVER_PORT': str(server_port)
+ ,'SERVER_PROTOCOL': self.request_version
+ # CGI not required by PEP-333
+ ,'REMOTE_ADDR': remote_address
+ }
+ if scheme:
+ self.wsgi_environ['paste.httpserver.proxy.scheme'] = scheme
+ if netloc:
+ self.wsgi_environ['paste.httpserver.proxy.host'] = netloc
+
+ if self.lookup_addresses:
+ # @@: make lookup_addreses actually work, at this point
+ # it has been address_string() is overriden down in
+ # file and hence is a noop
+ if remote_address.startswith("192.168.") \
+ or remote_address.startswith("10.") \
+ or remote_address.startswith("172.16."):
+ pass
+ else:
+ address_string = None # self.address_string()
+ if address_string:
+ self.wsgi_environ['REMOTE_HOST'] = address_string
+
+ if hasattr(self.server, 'thread_pool'):
+ # Now that we know what the request was for, we should
+ # tell the thread pool what its worker is working on
+ self.server.thread_pool.worker_tracker[thread.get_ident()][1] = self.wsgi_environ
+ self.wsgi_environ['paste.httpserver.thread_pool'] = self.server.thread_pool
+
+ for k, v in self.headers.items():
+ key = 'HTTP_' + k.replace("-","_").upper()
+ if key in ('HTTP_CONTENT_TYPE','HTTP_CONTENT_LENGTH'):
+ continue
+ self.wsgi_environ[key] = ','.join(self.headers.getheaders(k))
+
+ if hasattr(self.connection,'get_context'):
+ self.wsgi_environ['wsgi.url_scheme'] = 'https'
+ # @@: extract other SSL parameters from pyOpenSSL at...
+ # http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25
+
+ if environ:
+ assert isinstance(environ, dict)
+ self.wsgi_environ.update(environ)
+ if 'on' == environ.get('HTTPS'):
+ self.wsgi_environ['wsgi.url_scheme'] = 'https'
+
+ self.wsgi_curr_headers = None
+ self.wsgi_headers_sent = False
+
+ def wsgi_connection_drop(self, exce, environ=None):
+ """
+ Override this if you're interested in socket exceptions, such
+ as when the user clicks 'Cancel' during a file download.
+ """
+ pass
+
+ def wsgi_execute(self, environ=None):
+ """
+ Invoke the server's ``wsgi_application``.
+ """
+
+ self.wsgi_setup(environ)
+
+ try:
+ result = self.server.wsgi_application(self.wsgi_environ,
+ self.wsgi_start_response)
+ try:
+ for chunk in result:
+ self.wsgi_write_chunk(chunk)
+ if not self.wsgi_headers_sent:
+ self.wsgi_write_chunk('')
+ finally:
+ if hasattr(result,'close'):
+ result.close()
+ result = None
+ except socket.error, exce:
+ self.wsgi_connection_drop(exce, environ)
+ return
+ except:
+ if not self.wsgi_headers_sent:
+ error_msg = "Internal Server Error\n"
+ self.wsgi_curr_headers = (
+ '500 Internal Server Error',
+ [('Content-type', 'text/plain'),
+ ('Content-length', str(len(error_msg)))])
+ self.wsgi_write_chunk("Internal Server Error\n")
+ raise
+
+#
+# SSL Functionality
+#
+# This implementation was motivated by Sebastien Martini's SSL example
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
+#
+try:
+ from OpenSSL import SSL, tsafe
+ SocketErrors = (socket.error, SSL.ZeroReturnError, SSL.SysCallError)
+except ImportError:
+ # Do not require pyOpenSSL to be installed, but disable SSL
+ # functionality in that case.
+ SSL = None
+ SocketErrors = (socket.error,)
+ class SecureHTTPServer(HTTPServer):
+ def __init__(self, server_address, RequestHandlerClass,
+ ssl_context=None, request_queue_size=None):
+ assert not ssl_context, "pyOpenSSL not installed"
+ HTTPServer.__init__(self, server_address, RequestHandlerClass)
+ if request_queue_size:
+ self.socket.listen(request_queue_size)
+else:
+
+ class _ConnFixer(object):
+ """ wraps a socket connection so it implements makefile """
+ def __init__(self, conn):
+ self.__conn = conn
+ def makefile(self, mode, bufsize):
+ return socket._fileobject(self.__conn, mode, bufsize)
+ def __getattr__(self, attrib):
+ return getattr(self.__conn, attrib)
+
+ class SecureHTTPServer(HTTPServer):
+ """
+ Provides SSL server functionality on top of the BaseHTTPServer
+ by overriding _private_ members of Python's standard
+ distribution. The interface for this instance only changes by
+ adding a an optional ssl_context attribute to the constructor:
+
+ cntx = SSL.Context(SSL.SSLv23_METHOD)
+ cntx.use_privatekey_file("host.pem")
+ cntx.use_certificate_file("host.pem")
+
+ """
+
+ def __init__(self, server_address, RequestHandlerClass,
+ ssl_context=None, request_queue_size=None):
+ # This overrides the implementation of __init__ in python's
+ # SocketServer.TCPServer (which BaseHTTPServer.HTTPServer
+ # does not override, thankfully).
+ HTTPServer.__init__(self, server_address, RequestHandlerClass)
+ self.socket = socket.socket(self.address_family,
+ self.socket_type)
+ self.ssl_context = ssl_context
+ if ssl_context:
+ class TSafeConnection(tsafe.Connection):
+ def settimeout(self, *args):
+ self._lock.acquire()
+ try:
+ return self._ssl_conn.settimeout(*args)
+ finally:
+ self._lock.release()
+ def gettimeout(self):
+ self._lock.acquire()
+ try:
+ return self._ssl_conn.gettimeout()
+ finally:
+ self._lock.release()
+ self.socket = TSafeConnection(ssl_context, self.socket)
+ self.server_bind()
+ if request_queue_size:
+ self.socket.listen(request_queue_size)
+ self.server_activate()
+
+ def get_request(self):
+ # The default SSL request object does not seem to have a
+ # ``makefile(mode, bufsize)`` method as expected by
+ # Socketserver.StreamRequestHandler.
+ (conn, info) = self.socket.accept()
+ if self.ssl_context:
+ conn = _ConnFixer(conn)
+ return (conn, info)
+
+ def _auto_ssl_context():
+ import OpenSSL, time, random
+ pkey = OpenSSL.crypto.PKey()
+ pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 768)
+
+ cert = OpenSSL.crypto.X509()
+
+ cert.set_serial_number(random.randint(0, sys.maxint))
+ cert.gmtime_adj_notBefore(0)
+ cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
+ cert.get_subject().CN = '*'
+ cert.get_subject().O = 'Dummy Certificate'
+ cert.get_issuer().CN = 'Untrusted Authority'
+ cert.get_issuer().O = 'Self-Signed'
+ cert.set_pubkey(pkey)
+ cert.sign(pkey, 'md5')
+
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.use_privatekey(pkey)
+ ctx.use_certificate(cert)
+
+ return ctx
+
+class WSGIHandler(WSGIHandlerMixin, BaseHTTPRequestHandler):
+ """
+ A WSGI handler that overrides POST, GET and HEAD to delegate
+ requests to the server's ``wsgi_application``.
+ """
+ server_version = 'PasteWSGIServer/' + __version__
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ You normally don't need to override this method; see the class
+ __doc__ string for information on how to handle specific HTTP
+ commands such as GET and POST.
+
+ """
+ self.raw_requestline = self.rfile.readline()
+ if not self.raw_requestline:
+ self.close_connection = 1
+ return
+ if not self.parse_request(): # An error code has been sent, just exit
+ return
+ self.wsgi_execute()
+
+ def handle(self):
+ # don't bother logging disconnects while handling a request
+ try:
+ BaseHTTPRequestHandler.handle(self)
+ except SocketErrors, exce:
+ self.wsgi_connection_drop(exce)
+
+ def address_string(self):
+ """Return the client address formatted for logging.
+
+ This is overridden so that no hostname lookup is done.
+ """
+ return ''
+
+class LimitedLengthFile(object):
+ def __init__(self, file, length):
+ self.file = file
+ self.length = length
+ self._consumed = 0
+ if hasattr(self.file, 'seek'):
+ self.seek = self._seek
+
+ def __repr__(self):
+ base_repr = repr(self.file)
+ return base_repr[:-1] + ' length=%s>' % self.length
+
+ def read(self, length=None):
+ left = self.length - self._consumed
+ if length is None:
+ length = left
+ else:
+ length = min(length, left)
+ # next two lines are hnecessary only if read(0) blocks
+ if not left:
+ return ''
+ data = self.file.read(length)
+ self._consumed += len(data)
+ return data
+
+ def readline(self, *args):
+ max_read = self.length - self._consumed
+ if len(args):
+ max_read = min(args[0], max_read)
+ data = self.file.readline(max_read)
+ self._consumed += len(data)
+ return data
+
+ def readlines(self, hint=None):
+ data = self.file.readlines(hint)
+ for chunk in data:
+ self._consumed += len(chunk)
+ return data
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.length - self._consumed <= 0:
+ raise StopIteration
+ return self.readline()
+
+ ## Optional methods ##
+
+ def _seek(self, place):
+ self.file.seek(place)
+ self._consumed = place
+
+ def tell(self):
+ if hasattr(self.file, 'tell'):
+ return self.file.tell()
+ else:
+ return self._consumed
+
+class ThreadPool(object):
+ """
+ Generic thread pool with a queue of callables to consume.
+
+ Keeps a notion of the status of its worker threads:
+
+ idle: worker thread with nothing to do
+
+ busy: worker thread doing its job
+
+ hung: worker thread that's been doing a job for too long
+
+ dying: a hung thread that has been killed, but hasn't died quite
+ yet.
+
+ zombie: what was a worker thread that we've tried to kill but
+ isn't dead yet.
+
+ At any time you can call track_threads, to get a dictionary with
+ these keys and lists of thread_ids that fall in that status. All
+ keys will be present, even if they point to emty lists.
+
+ hung threads are threads that have been busy more than
+ hung_thread_limit seconds. Hung threads are killed when they live
+ longer than kill_thread_limit seconds. A thread is then
+ considered dying for dying_limit seconds, if it is still alive
+ after that it is considered a zombie.
+
+ When there are no idle workers and a request comes in, another
+ worker *may* be spawned. If there are less than spawn_if_under
+ threads in the busy state, another thread will be spawned. So if
+ the limit is 5, and there are 4 hung threads and 6 busy threads,
+ no thread will be spawned.
+
+ When there are more than max_zombie_threads_before_die zombie
+ threads, a SystemExit exception will be raised, stopping the
+ server. Use 0 or None to never raise this exception. Zombie
+ threads *should* get cleaned up, but killing threads is no
+ necessarily reliable. This is turned off by default, since it is
+ only a good idea if you've deployed the server with some process
+ watching from above (something similar to daemontools or zdaemon).
+
+ Each worker thread only processes ``max_requests`` tasks before it
+ dies and replaces itself with a new worker thread.
+ """
+
+
+ SHUTDOWN = object()
+
+ def __init__(
+ self, nworkers, name="ThreadPool", daemon=False,
+ max_requests=100, # threads are killed after this many requests
+ hung_thread_limit=30, # when a thread is marked "hung"
+ kill_thread_limit=1800, # when you kill that hung thread
+ dying_limit=300, # seconds that a kill should take to go into effect (longer than this and the thread is a "zombie")
+ spawn_if_under=5, # spawn if there's too many hung threads
+ max_zombie_threads_before_die=0, # when to give up on the process
+ hung_check_period=100, # every 100 requests check for hung workers
+ logger=None, # Place to log messages to
+ error_email=None, # Person(s) to notify if serious problem occurs
+ ):
+ """
+ Create thread pool with `nworkers` worker threads.
+ """
+ self.nworkers = nworkers
+ self.max_requests = max_requests
+ self.name = name
+ self.queue = Queue.Queue()
+ self.workers = []
+ self.daemon = daemon
+ if logger is None:
+ logger = logging.getLogger('paste.httpserver.ThreadPool')
+ if isinstance(logger, basestring):
+ logger = logging.getLogger(logger)
+ self.logger = logger
+ self.error_email = error_email
+ self._worker_count = count()
+
+ assert (not kill_thread_limit
+ or kill_thread_limit >= hung_thread_limit), (
+ "kill_thread_limit (%s) should be higher than hung_thread_limit (%s)"
+ % (kill_thread_limit, hung_thread_limit))
+ if not killthread:
+ kill_thread_limit = 0
+ self.logger.info(
+ "Cannot use kill_thread_limit as ctypes/killthread is not available")
+ self.kill_thread_limit = kill_thread_limit
+ self.dying_limit = dying_limit
+ self.hung_thread_limit = hung_thread_limit
+ assert spawn_if_under <= nworkers, (
+ "spawn_if_under (%s) should be less than nworkers (%s)"
+ % (spawn_if_under, nworkers))
+ self.spawn_if_under = spawn_if_under
+ self.max_zombie_threads_before_die = max_zombie_threads_before_die
+ self.hung_check_period = hung_check_period
+ self.requests_since_last_hung_check = 0
+ # Used to keep track of what worker is doing what:
+ self.worker_tracker = {}
+ # Used to keep track of the workers not doing anything:
+ self.idle_workers = []
+ # Used to keep track of threads that have been killed, but maybe aren't dead yet:
+ self.dying_threads = {}
+ # This is used to track when we last had to add idle workers;
+ # we shouldn't cull extra workers until some time has passed
+ # (hung_thread_limit) since workers were added:
+ self._last_added_new_idle_workers = 0
+ if not daemon:
+ atexit.register(self.shutdown)
+ for i in range(self.nworkers):
+ self.add_worker_thread(message='Initial worker pool')
+
+ def add_task(self, task):
+ """
+ Add a task to the queue
+ """
+ self.logger.debug('Added task (%i tasks queued)', self.queue.qsize())
+ if self.hung_check_period:
+ self.requests_since_last_hung_check += 1
+ if self.requests_since_last_hung_check > self.hung_check_period:
+ self.requests_since_last_hung_check = 0
+ self.kill_hung_threads()
+ if not self.idle_workers and self.spawn_if_under:
+ # spawn_if_under can come into effect...
+ busy = 0
+ now = time.time()
+ self.logger.debug('No idle workers for task; checking if we need to make more workers')
+ for worker in self.workers:
+ if not hasattr(worker, 'thread_id'):
+ # Not initialized
+ continue
+ time_started, info = self.worker_tracker.get(worker.thread_id,
+ (None, None))
+ if time_started is not None:
+ if now - time_started < self.hung_thread_limit:
+ busy += 1
+ if busy < self.spawn_if_under:
+ self.logger.info(
+ 'No idle tasks, and only %s busy tasks; adding %s more '
+ 'workers', busy, self.spawn_if_under-busy)
+ self._last_added_new_idle_workers = time.time()
+ for i in range(self.spawn_if_under - busy):
+ self.add_worker_thread(message='Response to lack of idle workers')
+ else:
+ self.logger.debug(
+ 'No extra workers needed (%s busy workers)',
+ busy)
+ if (len(self.workers) > self.nworkers
+ and len(self.idle_workers) > 3
+ and time.time()-self._last_added_new_idle_workers > self.hung_thread_limit):
+ # We've spawned worers in the past, but they aren't needed
+ # anymore; kill off some
+ self.logger.info(
+ 'Culling %s extra workers (%s idle workers present)',
+ len(self.workers)-self.nworkers, len(self.idle_workers))
+ self.logger.debug(
+ 'Idle workers: %s', self.idle_workers)
+ for i in range(len(self.workers) - self.nworkers):
+ self.queue.put(self.SHUTDOWN)
+ self.queue.put(task)
+
+ def track_threads(self):
+ """
+ Return a dict summarizing the threads in the pool (as
+ described in the ThreadPool docstring).
+ """
+ result = dict(idle=[], busy=[], hung=[], dying=[], zombie=[])
+ now = time.time()
+ for worker in self.workers:
+ if not hasattr(worker, 'thread_id'):
+ # The worker hasn't fully started up, we should just
+ # ignore it
+ continue
+ time_started, info = self.worker_tracker.get(worker.thread_id,
+ (None, None))
+ if time_started is not None:
+ if now - time_started > self.hung_thread_limit:
+ result['hung'].append(worker)
+ else:
+ result['busy'].append(worker)
+ else:
+ result['idle'].append(worker)
+ for thread_id, (time_killed, worker) in self.dying_threads.items():
+ if not self.thread_exists(thread_id):
+ # Cull dying threads that are actually dead and gone
+ self.logger.info('Killed thread %s no longer around',
+ thread_id)
+ try:
+ del self.dying_threads[thread_id]
+ except KeyError:
+ pass
+ continue
+ if now - time_killed > self.dying_limit:
+ result['zombie'].append(worker)
+ else:
+ result['dying'].append(worker)
+ return result
+
+ def kill_worker(self, thread_id):
+ """
+ Removes the worker with the given thread_id from the pool, and
+ replaces it with a new worker thread.
+
+ This should only be done for mis-behaving workers.
+ """
+ if killthread is None:
+ raise RuntimeError(
+ "Cannot kill worker; killthread/ctypes not available")
+ thread_obj = threading._active.get(thread_id)
+ killthread.async_raise(thread_id, SystemExit)
+ try:
+ del self.worker_tracker[thread_id]
+ except KeyError:
+ pass
+ self.logger.info('Killing thread %s', thread_id)
+ if thread_obj in self.workers:
+ self.workers.remove(thread_obj)
+ self.dying_threads[thread_id] = (time.time(), thread_obj)
+ self.add_worker_thread(message='Replacement for killed thread %s' % thread_id)
+
+ def thread_exists(self, thread_id):
+ """
+ Returns true if a thread with this id is still running
+ """
+ return thread_id in threading._active
+
+ def add_worker_thread(self, *args, **kwargs):
+ index = self._worker_count.next()
+ worker = threading.Thread(target=self.worker_thread_callback,
+ args=args, kwargs=kwargs,
+ name=("worker %d" % index))
+ worker.setDaemon(self.daemon)
+ worker.start()
+
+ def kill_hung_threads(self):
+ """
+ Tries to kill any hung threads
+ """
+ if not self.kill_thread_limit:
+ # No killing should occur
+ return
+ now = time.time()
+ max_time = 0
+ total_time = 0
+ idle_workers = 0
+ starting_workers = 0
+ working_workers = 0
+ killed_workers = 0
+ for worker in self.workers:
+ if not hasattr(worker, 'thread_id'):
+ # Not setup yet
+ starting_workers += 1
+ continue
+ time_started, info = self.worker_tracker.get(worker.thread_id,
+ (None, None))
+ if time_started is None:
+ # Must be idle
+ idle_workers += 1
+ continue
+ working_workers += 1
+ max_time = max(max_time, now-time_started)
+ total_time += now-time_started
+ if now - time_started > self.kill_thread_limit:
+ self.logger.warning(
+ 'Thread %s hung (working on task for %i seconds)',
+ worker.thread_id, now - time_started)
+ try:
+ import pprint
+ info_desc = pprint.pformat(info)
+ except:
+ out = StringIO()
+ traceback.print_exc(file=out)
+ info_desc = 'Error:\n%s' % out.getvalue()
+ self.notify_problem(
+ "Killing worker thread (id=%(thread_id)s) because it has been \n"
+ "working on task for %(time)s seconds (limit is %(limit)s)\n"
+ "Info on task:\n"
+ "%(info)s"
+ % dict(thread_id=worker.thread_id,
+ time=now - time_started,
+ limit=self.kill_thread_limit,
+ info=info_desc))
+ self.kill_worker(worker.thread_id)
+ killed_workers += 1
+ if working_workers:
+ ave_time = float(total_time) / working_workers
+ ave_time = '%.2fsec' % ave_time
+ else:
+ ave_time = 'N/A'
+ self.logger.info(
+ "kill_hung_threads status: %s threads (%s working, %s idle, %s starting) "
+ "ave time %s, max time %.2fsec, killed %s workers"
+ % (idle_workers + starting_workers + working_workers,
+ working_workers, idle_workers, starting_workers,
+ ave_time, max_time, killed_workers))
+ self.check_max_zombies()
+
+ def check_max_zombies(self):
+ """
+ Check if we've reached max_zombie_threads_before_die; if so
+ then kill the entire process.
+ """
+ if not self.max_zombie_threads_before_die:
+ return
+ found = []
+ now = time.time()
+ for thread_id, (time_killed, worker) in self.dying_threads.items():
+ if not self.thread_exists(thread_id):
+ # Cull dying threads that are actually dead and gone
+ try:
+ del self.dying_threads[thread_id]
+ except KeyError:
+ pass
+ continue
+ if now - time_killed > self.dying_limit:
+ found.append(thread_id)
+ if found:
+ self.logger.info('Found %s zombie threads', found)
+ if len(found) > self.max_zombie_threads_before_die:
+ self.logger.fatal(
+ 'Exiting process because %s zombie threads is more than %s limit',
+ len(found), self.max_zombie_threads_before_die)
+ self.notify_problem(
+ "Exiting process because %(found)s zombie threads "
+ "(more than limit of %(limit)s)\n"
+ "Bad threads (ids):\n"
+ " %(ids)s\n"
+ % dict(found=len(found),
+ limit=self.max_zombie_threads_before_die,
+ ids="\n ".join(map(str, found))),
+ subject="Process restart (too many zombie threads)")
+ self.shutdown(10)
+ print 'Shutting down', threading.currentThread()
+ raise ServerExit(3)
+
+ def worker_thread_callback(self, message=None):
+ """
+ Worker thread should call this method to get and process queued
+ callables.
+ """
+ thread_obj = threading.currentThread()
+ thread_id = thread_obj.thread_id = thread.get_ident()
+ self.workers.append(thread_obj)
+ self.idle_workers.append(thread_id)
+ requests_processed = 0
+ add_replacement_worker = False
+ self.logger.debug('Started new worker %s: %s', thread_id, message)
+ try:
+ while True:
+ if self.max_requests and self.max_requests < requests_processed:
+ # Replace this thread then die
+ self.logger.debug('Thread %s processed %i requests (limit %s); stopping thread'
+ % (thread_id, requests_processed, self.max_requests))
+ add_replacement_worker = True
+ break
+ runnable = self.queue.get()
+ if runnable is ThreadPool.SHUTDOWN:
+ self.logger.debug('Worker %s asked to SHUTDOWN', thread_id)
+ break
+ try:
+ self.idle_workers.remove(thread_id)
+ except ValueError:
+ pass
+ self.worker_tracker[thread_id] = [time.time(), None]
+ requests_processed += 1
+ try:
+ try:
+ runnable()
+ except:
+ # We are later going to call sys.exc_clear(),
+ # removing all remnants of any exception, so
+ # we should log it now. But ideally no
+ # exception should reach this level
+ print >> sys.stderr, (
+ 'Unexpected exception in worker %r' % runnable)
+ traceback.print_exc()
+ if thread_id in self.dying_threads:
+ # That last exception was intended to kill me
+ break
+ finally:
+ try:
+ del self.worker_tracker[thread_id]
+ except KeyError:
+ pass
+ sys.exc_clear()
+ self.idle_workers.append(thread_id)
+ finally:
+ try:
+ del self.worker_tracker[thread_id]
+ except KeyError:
+ pass
+ try:
+ self.idle_workers.remove(thread_id)
+ except ValueError:
+ pass
+ try:
+ self.workers.remove(thread_obj)
+ except ValueError:
+ pass
+ try:
+ del self.dying_threads[thread_id]
+ except KeyError:
+ pass
+ if add_replacement_worker:
+ self.add_worker_thread(message='Voluntary replacement for thread %s' % thread_id)
+
+ def shutdown(self, force_quit_timeout=0):
+ """
+ Shutdown the queue (after finishing any pending requests).
+ """
+ self.logger.info('Shutting down threadpool')
+ # Add a shutdown request for every worker
+ for i in range(len(self.workers)):
+ self.queue.put(ThreadPool.SHUTDOWN)
+ # Wait for each thread to terminate
+ hung_workers = []
+ for worker in self.workers:
+ worker.join(0.5)
+ if worker.isAlive():
+ hung_workers.append(worker)
+ zombies = []
+ for thread_id in self.dying_threads:
+ if self.thread_exists(thread_id):
+ zombies.append(thread_id)
+ if hung_workers or zombies:
+ self.logger.info("%s workers didn't stop properly, and %s zombies",
+ len(hung_workers), len(zombies))
+ if hung_workers:
+ for worker in hung_workers:
+ self.kill_worker(worker.thread_id)
+ self.logger.info('Workers killed forcefully')
+ if force_quit_timeout:
+ hung = []
+ timed_out = False
+ need_force_quit = bool(zombies)
+ for workers in self.workers:
+ if not timed_out and worker.isAlive():
+ timed_out = True
+ worker.join(force_quit_timeout)
+ if worker.isAlive():
+ print "Worker %s won't die" % worker
+ need_force_quit = True
+ if need_force_quit:
+ import atexit
+ # Remove the threading atexit callback
+ for callback in list(atexit._exithandlers):
+ func = getattr(callback[0], 'im_func', None)
+ if not func:
+ continue
+ globs = getattr(func, 'func_globals', {})
+ mod = globs.get('__name__')
+ if mod == 'threading':
+ atexit._exithandlers.remove(callback)
+ atexit._run_exitfuncs()
+ print 'Forcefully exiting process'
+ os._exit(3)
+ else:
+ self.logger.info('All workers eventually killed')
+ else:
+ self.logger.info('All workers stopped')
+
+ def notify_problem(self, msg, subject=None, spawn_thread=True):
+ """
+ Called when there's a substantial problem. msg contains the
+ body of the notification, subject the summary.
+
+ If spawn_thread is true, then the email will be send in
+ another thread (so this doesn't block).
+ """
+ if not self.error_email:
+ return
+ if spawn_thread:
+ t = threading.Thread(
+ target=self.notify_problem,
+ args=(msg, subject, False))
+ t.start()
+ return
+ from_address = 'errors@localhost'
+ if not subject:
+ subject = msg.strip().splitlines()[0]
+ subject = subject[:50]
+ subject = '[http threadpool] %s' % subject
+ headers = [
+ "To: %s" % self.error_email,
+ "From: %s" % from_address,
+ "Subject: %s" % subject,
+ ]
+ try:
+ system = ' '.join(os.uname())
+ except:
+ system = '(unknown)'
+ body = (
+ "An error has occurred in the paste.httpserver.ThreadPool\n"
+ "Error:\n"
+ " %(msg)s\n"
+ "Occurred at: %(time)s\n"
+ "PID: %(pid)s\n"
+ "System: %(system)s\n"
+ "Server .py file: %(file)s\n"
+ % dict(msg=msg,
+ time=time.strftime("%c"),
+ pid=os.getpid(),
+ system=system,
+ file=os.path.abspath(__file__),
+ ))
+ message = '\n'.join(headers) + "\n\n" + body
+ import smtplib
+ server = smtplib.SMTP('localhost')
+ error_emails = [
+ e.strip() for e in self.error_email.split(",")
+ if e.strip()]
+ server.sendmail(from_address, error_emails, message)
+ server.quit()
+ print 'email sent to', error_emails, message
+
+class ThreadPoolMixIn(object):
+ """
+ Mix-in class to process requests from a thread pool
+ """
+ def __init__(self, nworkers, daemon=False, **threadpool_options):
+ # Create and start the workers
+ self.running = True
+ assert nworkers > 0, "ThreadPoolMixIn servers must have at least one worker"
+ self.thread_pool = ThreadPool(
+ nworkers,
+ "ThreadPoolMixIn HTTP server on %s:%d"
+ % (self.server_name, self.server_port),
+ daemon,
+ **threadpool_options)
+
+ def process_request(self, request, client_address):
+ """
+ Queue the request to be processed by on of the thread pool threads
+ """
+ # This sets the socket to blocking mode (and no timeout) since it
+ # may take the thread pool a little while to get back to it. (This
+ # is the default but since we set a timeout on the parent socket so
+ # that we can trap interrupts we need to restore this,.)
+ request.setblocking(1)
+ # Queue processing of the request
+ self.thread_pool.add_task(
+ lambda: self.process_request_in_thread(request, client_address))
+
+ def handle_error(self, request, client_address):
+ exc_class, exc, tb = sys.exc_info()
+ if exc_class is ServerExit:
+ # This is actually a request to stop the server
+ raise
+ return super(ThreadPoolMixIn, self).handle_error(request, client_address)
+
+ def process_request_in_thread(self, request, client_address):
+ """
+ The worker thread should call back here to do the rest of the
+ request processing. Error handling normaller done in 'handle_request'
+ must be done here.
+ """
+ try:
+ self.finish_request(request, client_address)
+ self.close_request(request)
+ except:
+ self.handle_error(request, client_address)
+ self.close_request(request)
+ exc = sys.exc_info()[1]
+ if isinstance(exc, (MemoryError, KeyboardInterrupt)):
+ raise
+
+ def serve_forever(self):
+ """
+ Overrides `serve_forever` to shut the threadpool down cleanly.
+ """
+ try:
+ while self.running:
+ try:
+ self.handle_request()
+ except socket.timeout:
+ # Timeout is expected, gives interrupts a chance to
+ # propogate, just keep handling
+ pass
+ finally:
+ self.thread_pool.shutdown()
+
+ def server_activate(self):
+ """
+ Overrides server_activate to set timeout on our listener socket.
+ """
+ # We set the timeout here so that we can trap interrupts on windows
+ self.socket.settimeout(1)
+
+ def server_close(self):
+ """
+ Finish pending requests and shutdown the server.
+ """
+ self.running = False
+ self.socket.close()
+ self.thread_pool.shutdown(60)
+
+class WSGIServerBase(SecureHTTPServer):
+ def __init__(self, wsgi_application, server_address,
+ RequestHandlerClass=None, ssl_context=None,
+ request_queue_size=None):
+ SecureHTTPServer.__init__(self, server_address,
+ RequestHandlerClass, ssl_context,
+ request_queue_size=request_queue_size)
+ self.wsgi_application = wsgi_application
+ self.wsgi_socket_timeout = None
+
+ def get_request(self):
+ # If there is a socket_timeout, set it on the accepted
+ (conn,info) = SecureHTTPServer.get_request(self)
+ if self.wsgi_socket_timeout:
+ conn.settimeout(self.wsgi_socket_timeout)
+ return (conn, info)
+
+class WSGIServer(ThreadingMixIn, WSGIServerBase):
+ daemon_threads = False
+
+class WSGIThreadPoolServer(ThreadPoolMixIn, WSGIServerBase):
+ def __init__(self, wsgi_application, server_address,
+ RequestHandlerClass=None, ssl_context=None,
+ nworkers=10, daemon_threads=False,
+ threadpool_options=None, request_queue_size=None):
+ WSGIServerBase.__init__(self, wsgi_application, server_address,
+ RequestHandlerClass, ssl_context,
+ request_queue_size=request_queue_size)
+ if threadpool_options is None:
+ threadpool_options = {}
+ ThreadPoolMixIn.__init__(self, nworkers, daemon_threads,
+ **threadpool_options)
+
+class ServerExit(SystemExit):
+ """
+ Raised to tell the server to really exit (SystemExit is normally
+ caught)
+ """
+
+def serve(application, host=None, port=None, handler=None, ssl_pem=None,
+ ssl_context=None, server_version=None, protocol_version=None,
+ start_loop=True, daemon_threads=None, socket_timeout=None,
+ use_threadpool=None, threadpool_workers=10,
+ threadpool_options=None, request_queue_size=5):
+ """
+ Serves your ``application`` over HTTP(S) via WSGI interface
+
+ ``host``
+
+ This is the ipaddress to bind to (or a hostname if your
+ nameserver is properly configured). This defaults to
+ 127.0.0.1, which is not a public interface.
+
+ ``port``
+
+ The port to run on, defaults to 8080 for HTTP, or 4443 for
+ HTTPS. This can be a string or an integer value.
+
+ ``handler``
+
+ This is the HTTP request handler to use, it defaults to
+ ``WSGIHandler`` in this module.
+
+ ``ssl_pem``
+
+ This an optional SSL certificate file (via OpenSSL). You can
+ supply ``*`` and a development-only certificate will be
+ created for you, or you can generate a self-signed test PEM
+ certificate file as follows::
+
+ $ openssl genrsa 1024 > host.key
+ $ chmod 400 host.key
+ $ openssl req -new -x509 -nodes -sha1 -days 365 \\
+ -key host.key > host.cert
+ $ cat host.cert host.key > host.pem
+ $ chmod 400 host.pem
+
+ ``ssl_context``
+
+ This an optional SSL context object for the server. A SSL
+ context will be automatically constructed for you if you supply
+ ``ssl_pem``. Supply this to use a context of your own
+ construction.
+
+ ``server_version``
+
+ The version of the server as reported in HTTP response line. This
+ defaults to something like "PasteWSGIServer/0.5". Many servers
+ hide their code-base identity with a name like 'Amnesiac/1.0'
+
+ ``protocol_version``
+
+ This sets the protocol used by the server, by default
+ ``HTTP/1.0``. There is some support for ``HTTP/1.1``, which
+ defaults to nicer keep-alive connections. This server supports
+ ``100 Continue``, but does not yet support HTTP/1.1 Chunked
+ Encoding. Hence, if you use HTTP/1.1, you're somewhat in error
+ since chunked coding is a mandatory requirement of a HTTP/1.1
+ server. If you specify HTTP/1.1, every response *must* have a
+ ``Content-Length`` and you must be careful not to read past the
+ end of the socket.
+
+ ``start_loop``
+
+ This specifies if the server loop (aka ``server.serve_forever()``)
+ should be called; it defaults to ``True``.
+
+ ``daemon_threads``
+
+ This flag specifies if when your webserver terminates all
+ in-progress client connections should be droppped. It defaults
+ to ``False``. You might want to set this to ``True`` if you
+ are using ``HTTP/1.1`` and don't set a ``socket_timeout``.
+
+ ``socket_timeout``
+
+ This specifies the maximum amount of time that a connection to a
+ given client will be kept open. At this time, it is a rude
+ disconnect, but at a later time it might follow the RFC a bit
+ more closely.
+
+ ``use_threadpool``
+
+ Server requests from a pool of worker threads (``threadpool_workers``)
+ rather than creating a new thread for each request. This can
+ substantially reduce latency since there is a high cost associated
+ with thread creation.
+
+ ``threadpool_workers``
+
+ Number of worker threads to create when ``use_threadpool`` is true. This
+ can be a string or an integer value.
+
+ ``threadpool_options``
+
+ A dictionary of options to be used when instantiating the
+ threadpool. See paste.httpserver.ThreadPool for specific
+ options (``threadpool_workers`` is a specific option that can
+ also go here).
+
+ ``request_queue_size``
+
+ The 'backlog' argument to socket.listen(); specifies the
+ maximum number of queued connections.
+
+ """
+ is_ssl = False
+ if ssl_pem or ssl_context:
+ assert SSL, "pyOpenSSL is not installed"
+ is_ssl = True
+ port = int(port or 4443)
+ if not ssl_context:
+ if ssl_pem == '*':
+ ssl_context = _auto_ssl_context()
+ else:
+ ssl_context = SSL.Context(SSL.SSLv23_METHOD)
+ ssl_context.use_privatekey_file(ssl_pem)
+ ssl_context.use_certificate_chain_file(ssl_pem)
+
+ host = host or '127.0.0.1'
+ if port is None:
+ if ':' in host:
+ host, port = host.split(':', 1)
+ else:
+ port = 8080
+ server_address = (host, int(port))
+
+ if not handler:
+ handler = WSGIHandler
+ if server_version:
+ handler.server_version = server_version
+ handler.sys_version = None
+ if protocol_version:
+ assert protocol_version in ('HTTP/0.9', 'HTTP/1.0', 'HTTP/1.1')
+ handler.protocol_version = protocol_version
+
+ if use_threadpool is None:
+ use_threadpool = True
+
+ if converters.asbool(use_threadpool):
+ server = WSGIThreadPoolServer(application, server_address, handler,
+ ssl_context, int(threadpool_workers),
+ daemon_threads,
+ threadpool_options=threadpool_options,
+ request_queue_size=request_queue_size)
+ else:
+ server = WSGIServer(application, server_address, handler, ssl_context,
+ request_queue_size=request_queue_size)
+ if daemon_threads:
+ server.daemon_threads = daemon_threads
+
+ if socket_timeout:
+ server.wsgi_socket_timeout = int(socket_timeout)
+
+ if converters.asbool(start_loop):
+ protocol = is_ssl and 'https' or 'http'
+ host, port = server.server_address[:2]
+ if host == '0.0.0.0':
+ print 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % \
+ (port, protocol, port)
+ else:
+ print "serving on %s://%s:%s" % (protocol, host, port)
+ try:
+ server.serve_forever()
+ except KeyboardInterrupt:
+ # allow CTRL+C to shutdown
+ pass
+ return server
+
+# For paste.deploy server instantiation (egg:Paste#http)
+# Note: this gets a separate function because it has to expect string
+# arguments (though that's not much of an issue yet, ever?)
+def server_runner(wsgi_app, global_conf, **kwargs):
+ from paste.deploy.converters import asbool
+ for name in ['port', 'socket_timeout', 'threadpool_workers',
+ 'threadpool_hung_thread_limit',
+ 'threadpool_kill_thread_limit',
+ 'threadpool_dying_limit', 'threadpool_spawn_if_under',
+ 'threadpool_max_zombie_threads_before_die',
+ 'threadpool_hung_check_period',
+ 'threadpool_max_requests', 'request_queue_size']:
+ if name in kwargs:
+ kwargs[name] = int(kwargs[name])
+ for name in ['use_threadpool', 'daemon_threads']:
+ if name in kwargs:
+ kwargs[name] = asbool(kwargs[name])
+ threadpool_options = {}
+ for name, value in kwargs.items():
+ if name.startswith('threadpool_') and name != 'threadpool_workers':
+ threadpool_options[name[len('threadpool_'):]] = value
+ del kwargs[name]
+ if ('error_email' not in threadpool_options
+ and 'error_email' in global_conf):
+ threadpool_options['error_email'] = global_conf['error_email']
+ kwargs['threadpool_options'] = threadpool_options
+ serve(wsgi_app, **kwargs)
+
+server_runner.__doc__ = (serve.__doc__ or '') + """
+
+ You can also set these threadpool options:
+
+ ``threadpool_max_requests``:
+
+ The maximum number of requests a worker thread will process
+ before dying (and replacing itself with a new worker thread).
+ Default 100.
+
+ ``threadpool_hung_thread_limit``:
+
+ The number of seconds a thread can work on a task before it is
+ considered hung (stuck). Default 30 seconds.
+
+ ``threadpool_kill_thread_limit``:
+
+ The number of seconds a thread can work before you should kill it
+ (assuming it will never finish). Default 600 seconds (10 minutes).
+
+ ``threadpool_dying_limit``:
+
+ The length of time after killing a thread that it should actually
+ disappear. If it lives longer than this, it is considered a
+ "zombie". Note that even in easy situations killing a thread can
+ be very slow. Default 300 seconds (5 minutes).
+
+ ``threadpool_spawn_if_under``:
+
+ If there are no idle threads and a request comes in, and there are
+ less than this number of *busy* threads, then add workers to the
+ pool. Busy threads are threads that have taken less than
+ ``threadpool_hung_thread_limit`` seconds so far. So if you get
+ *lots* of requests but they complete in a reasonable amount of time,
+ the requests will simply queue up (adding more threads probably
+ wouldn't speed them up). But if you have lots of hung threads and
+ one more request comes in, this will add workers to handle it.
+ Default 5.
+
+ ``threadpool_max_zombie_threads_before_die``:
+
+ If there are more zombies than this, just kill the process. This is
+ only good if you have a monitor that will automatically restart
+ the server. This can clean up the mess. Default 0 (disabled).
+
+ `threadpool_hung_check_period``:
+
+ Every X requests, check for hung threads that need to be killed,
+ or for zombie threads that should cause a restart. Default 100
+ requests.
+
+ ``threadpool_logger``:
+
+ Logging messages will go the logger named here.
+
+ ``threadpool_error_email`` (or global ``error_email`` setting):
+
+ When threads are killed or the process restarted, this email
+ address will be contacted (using an SMTP server on localhost).
+
+"""
+
+
+if __name__ == '__main__':
+ from paste.wsgilib import dump_environ
+ #serve(dump_environ, ssl_pem="test.pem")
+ serve(dump_environ, server_version="Wombles/1.0",
+ protocol_version="HTTP/1.1", port="8888")
diff --git a/paste/lint.py b/paste/lint.py
new file mode 100644
index 0000000..03b4a60
--- /dev/null
+++ b/paste/lint.py
@@ -0,0 +1,436 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
+# Licensed to PSF under a Contributor Agreement
+"""
+Middleware to check for obedience to the WSGI specification.
+
+Some of the things this checks:
+
+* Signature of the application and start_response (including that
+ keyword arguments are not used).
+
+* Environment checks:
+
+ - Environment is a dictionary (and not a subclass).
+
+ - That all the required keys are in the environment: REQUEST_METHOD,
+ SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
+ wsgi.multithread, wsgi.multiprocess, wsgi.run_once
+
+ - That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
+ environment (these headers should appear as CONTENT_LENGTH and
+ CONTENT_TYPE).
+
+ - Warns if QUERY_STRING is missing, as the cgi module acts
+ unpredictably in that case.
+
+ - That CGI-style variables (that don't contain a .) have
+ (non-unicode) string values
+
+ - That wsgi.version is a tuple
+
+ - That wsgi.url_scheme is 'http' or 'https' (@@: is this too
+ restrictive?)
+
+ - Warns if the REQUEST_METHOD is not known (@@: probably too
+ restrictive).
+
+ - That SCRIPT_NAME and PATH_INFO are empty or start with /
+
+ - That at least one of SCRIPT_NAME or PATH_INFO are set.
+
+ - That CONTENT_LENGTH is a positive integer.
+
+ - That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
+ be '/').
+
+ - That wsgi.input has the methods read, readline, readlines, and
+ __iter__
+
+ - That wsgi.errors has the methods flush, write, writelines
+
+* The status is a string, contains a space, starts with an integer,
+ and that integer is in range (> 100).
+
+* That the headers is a list (not a subclass, not another kind of
+ sequence).
+
+* That the items of the headers are tuples of strings.
+
+* That there is no 'status' header (that is used in CGI, but not in
+ WSGI).
+
+* That the headers don't contain newlines or colons, end in _ or -, or
+ contain characters codes below 037.
+
+* That Content-Type is given if there is content (CGI often has a
+ default content type, but WSGI does not).
+
+* That no Content-Type is given when there is no content (@@: is this
+ too restrictive?)
+
+* That the exc_info argument to start_response is a tuple or None.
+
+* That all calls to the writer are with strings, and no other methods
+ on the writer are accessed.
+
+* That wsgi.input is used properly:
+
+ - .read() is called with zero or one argument
+
+ - That it returns a string
+
+ - That readline, readlines, and __iter__ return strings
+
+ - That .close() is not called
+
+ - No other methods are provided
+
+* That wsgi.errors is used properly:
+
+ - .write() and .writelines() is called with a string
+
+ - That .close() is not called, and no other methods are provided.
+
+* The response iterator:
+
+ - That it is not a string (it should be a list of a single string; a
+ string will work, but perform horribly).
+
+ - That .next() returns a string
+
+ - That the iterator is not iterated over until start_response has
+ been called (that can signal either a server or application
+ error).
+
+ - That .close() is called (doesn't raise exception, only prints to
+ sys.stderr, because we only know it isn't called when the object
+ is garbage collected).
+"""
+
+import re
+import sys
+from types import DictType, StringType, TupleType, ListType
+import warnings
+
+header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
+bad_header_value_re = re.compile(r'[\000-\037]')
+
+class WSGIWarning(Warning):
+ """
+ Raised in response to WSGI-spec-related warnings
+ """
+
+def middleware(application, global_conf=None):
+
+ """
+ When applied between a WSGI server and a WSGI application, this
+ middleware will check for WSGI compliancy on a number of levels.
+ This middleware does not modify the request or response in any
+ way, but will throw an AssertionError if anything seems off
+ (except for a failure to close the application iterator, which
+ will be printed to stderr -- there's no way to throw an exception
+ at that point).
+ """
+
+ def lint_app(*args, **kw):
+ assert len(args) == 2, "Two arguments required"
+ assert not kw, "No keyword arguments allowed"
+ environ, start_response = args
+
+ check_environ(environ)
+
+ # We use this to check if the application returns without
+ # calling start_response:
+ start_response_started = []
+
+ def start_response_wrapper(*args, **kw):
+ assert len(args) == 2 or len(args) == 3, (
+ "Invalid number of arguments: %s" % args)
+ assert not kw, "No keyword arguments allowed"
+ status = args[0]
+ headers = args[1]
+ if len(args) == 3:
+ exc_info = args[2]
+ else:
+ exc_info = None
+
+ check_status(status)
+ check_headers(headers)
+ check_content_type(status, headers)
+ check_exc_info(exc_info)
+
+ start_response_started.append(None)
+ return WriteWrapper(start_response(*args))
+
+ environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
+ environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
+
+ iterator = application(environ, start_response_wrapper)
+ assert iterator is not None and iterator != False, (
+ "The application must return an iterator, if only an empty list")
+
+ check_iterator(iterator)
+
+ return IteratorWrapper(iterator, start_response_started)
+
+ return lint_app
+
+class InputWrapper(object):
+
+ def __init__(self, wsgi_input):
+ self.input = wsgi_input
+
+ def read(self, *args):
+ assert len(args) <= 1
+ v = self.input.read(*args)
+ assert type(v) is type("")
+ return v
+
+ def readline(self, *args):
+ v = self.input.readline(*args)
+ assert type(v) is type("")
+ return v
+
+ def readlines(self, *args):
+ assert len(args) <= 1
+ lines = self.input.readlines(*args)
+ assert type(lines) is type([])
+ for line in lines:
+ assert type(line) is type("")
+ return lines
+
+ def __iter__(self):
+ while 1:
+ line = self.readline()
+ if not line:
+ return
+ yield line
+
+ def close(self):
+ assert 0, "input.close() must not be called"
+
+class ErrorWrapper(object):
+
+ def __init__(self, wsgi_errors):
+ self.errors = wsgi_errors
+
+ def write(self, s):
+ assert type(s) is type("")
+ self.errors.write(s)
+
+ def flush(self):
+ self.errors.flush()
+
+ def writelines(self, seq):
+ for line in seq:
+ self.write(line)
+
+ def close(self):
+ assert 0, "errors.close() must not be called"
+
+class WriteWrapper(object):
+
+ def __init__(self, wsgi_writer):
+ self.writer = wsgi_writer
+
+ def __call__(self, s):
+ assert type(s) is type("")
+ self.writer(s)
+
+class PartialIteratorWrapper(object):
+
+ def __init__(self, wsgi_iterator):
+ self.iterator = wsgi_iterator
+
+ def __iter__(self):
+ # We want to make sure __iter__ is called
+ return IteratorWrapper(self.iterator)
+
+class IteratorWrapper(object):
+
+ def __init__(self, wsgi_iterator, check_start_response):
+ self.original_iterator = wsgi_iterator
+ self.iterator = iter(wsgi_iterator)
+ self.closed = False
+ self.check_start_response = check_start_response
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ assert not self.closed, (
+ "Iterator read after closed")
+ v = self.iterator.next()
+ if self.check_start_response is not None:
+ assert self.check_start_response, (
+ "The application returns and we started iterating over its body, but start_response has not yet been called")
+ self.check_start_response = None
+ return v
+
+ def close(self):
+ self.closed = True
+ if hasattr(self.original_iterator, 'close'):
+ self.original_iterator.close()
+
+ def __del__(self):
+ if not self.closed:
+ sys.stderr.write(
+ "Iterator garbage collected without being closed")
+ assert self.closed, (
+ "Iterator garbage collected without being closed")
+
+def check_environ(environ):
+ assert type(environ) is DictType, (
+ "Environment is not of the right type: %r (environment: %r)"
+ % (type(environ), environ))
+
+ for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
+ 'wsgi.version', 'wsgi.input', 'wsgi.errors',
+ 'wsgi.multithread', 'wsgi.multiprocess',
+ 'wsgi.run_once']:
+ assert key in environ, (
+ "Environment missing required key: %r" % key)
+
+ for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
+ assert key not in environ, (
+ "Environment should not have the key: %s "
+ "(use %s instead)" % (key, key[5:]))
+
+ if 'QUERY_STRING' not in environ:
+ warnings.warn(
+ 'QUERY_STRING is not in the WSGI environment; the cgi '
+ 'module will use sys.argv when this variable is missing, '
+ 'so application errors are more likely',
+ WSGIWarning)
+
+ for key in environ.keys():
+ if '.' in key:
+ # Extension, we don't care about its type
+ continue
+ assert type(environ[key]) is StringType, (
+ "Environmental variable %s is not a string: %r (value: %r)"
+ % (key, type(environ[key]), environ[key]))
+
+ assert type(environ['wsgi.version']) is TupleType, (
+ "wsgi.version should be a tuple (%r)" % environ['wsgi.version'])
+ assert environ['wsgi.url_scheme'] in ('http', 'https'), (
+ "wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
+
+ check_input(environ['wsgi.input'])
+ check_errors(environ['wsgi.errors'])
+
+ # @@: these need filling out:
+ if environ['REQUEST_METHOD'] not in (
+ 'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
+ warnings.warn(
+ "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
+ WSGIWarning)
+
+ assert (not environ.get('SCRIPT_NAME')
+ or environ['SCRIPT_NAME'].startswith('/')), (
+ "SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
+ assert (not environ.get('PATH_INFO')
+ or environ['PATH_INFO'].startswith('/')), (
+ "PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
+ if environ.get('CONTENT_LENGTH'):
+ assert int(environ['CONTENT_LENGTH']) >= 0, (
+ "Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
+
+ if not environ.get('SCRIPT_NAME'):
+ assert environ.has_key('PATH_INFO'), (
+ "One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
+ "should at least be '/' if SCRIPT_NAME is empty)")
+ assert environ.get('SCRIPT_NAME') != '/', (
+ "SCRIPT_NAME cannot be '/'; it should instead be '', and "
+ "PATH_INFO should be '/'")
+
+def check_input(wsgi_input):
+ for attr in ['read', 'readline', 'readlines', '__iter__']:
+ assert hasattr(wsgi_input, attr), (
+ "wsgi.input (%r) doesn't have the attribute %s"
+ % (wsgi_input, attr))
+
+def check_errors(wsgi_errors):
+ for attr in ['flush', 'write', 'writelines']:
+ assert hasattr(wsgi_errors, attr), (
+ "wsgi.errors (%r) doesn't have the attribute %s"
+ % (wsgi_errors, attr))
+
+def check_status(status):
+ assert type(status) is StringType, (
+ "Status must be a string (not %r)" % status)
+ # Implicitly check that we can turn it into an integer:
+ status_code = status.split(None, 1)[0]
+ assert len(status_code) == 3, (
+ "Status codes must be three characters: %r" % status_code)
+ status_int = int(status_code)
+ assert status_int >= 100, "Status code is invalid: %r" % status_int
+ if len(status) < 4 or status[3] != ' ':
+ warnings.warn(
+ "The status string (%r) should be a three-digit integer "
+ "followed by a single space and a status explanation"
+ % status, WSGIWarning)
+
+def check_headers(headers):
+ assert type(headers) is ListType, (
+ "Headers (%r) must be of type list: %r"
+ % (headers, type(headers)))
+ header_names = {}
+ for item in headers:
+ assert type(item) is TupleType, (
+ "Individual headers (%r) must be of type tuple: %r"
+ % (item, type(item)))
+ assert len(item) == 2
+ name, value = item
+ assert name.lower() != 'status', (
+ "The Status header cannot be used; it conflicts with CGI "
+ "script, and HTTP status is not given through headers "
+ "(value: %r)." % value)
+ header_names[name.lower()] = None
+ assert '\n' not in name and ':' not in name, (
+ "Header names may not contain ':' or '\\n': %r" % name)
+ assert header_re.search(name), "Bad header name: %r" % name
+ assert not name.endswith('-') and not name.endswith('_'), (
+ "Names may not end in '-' or '_': %r" % name)
+ assert not bad_header_value_re.search(value), (
+ "Bad header value: %r (bad char: %r)"
+ % (value, bad_header_value_re.search(value).group(0)))
+
+def check_content_type(status, headers):
+ code = int(status.split(None, 1)[0])
+ # @@: need one more person to verify this interpretation of RFC 2616
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
+ NO_MESSAGE_BODY = (204, 304)
+ NO_MESSAGE_TYPE = (204, 304)
+ for name, value in headers:
+ if name.lower() == 'content-type':
+ if code not in NO_MESSAGE_TYPE:
+ return
+ assert 0, (("Content-Type header found in a %s response, "
+ "which must not return content.") % code)
+ if code not in NO_MESSAGE_BODY:
+ assert 0, "No Content-Type header found in headers (%s)" % headers
+
+def check_exc_info(exc_info):
+ assert exc_info is None or type(exc_info) is type(()), (
+ "exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
+ # More exc_info checks?
+
+def check_iterator(iterator):
+ # Technically a string is legal, which is why it's a really bad
+ # idea, because it may cause the response to be returned
+ # character-by-character
+ assert not isinstance(iterator, str), (
+ "You should not return a string as your application iterator, "
+ "instead return a single-item list containing that string.")
+
+def make_middleware(application, global_conf):
+ # @@: global_conf should be taken out of the middleware function,
+ # and isolated here
+ return middleware(application)
+
+make_middleware.__doc__ = __doc__
+
+__all__ = ['middleware', 'make_middleware']
diff --git a/paste/modpython.py b/paste/modpython.py
new file mode 100644
index 0000000..a3fef55
--- /dev/null
+++ b/paste/modpython.py
@@ -0,0 +1,252 @@
+"""WSGI Paste wrapper for mod_python. Requires Python 2.2 or greater.
+
+
+Example httpd.conf section for a Paste app with an ini file::
+
+ <Location />
+ SetHandler python-program
+ PythonHandler paste.modpython
+ PythonOption paste.ini /some/location/your/pasteconfig.ini
+ </Location>
+
+Or if you want to load a WSGI application under /your/homedir in the module
+``startup`` and the WSGI app is ``app``::
+
+ <Location />
+ SetHandler python-program
+ PythonHandler paste.modpython
+ PythonPath "['/virtual/project/directory'] + sys.path"
+ PythonOption wsgi.application startup::app
+ </Location>
+
+
+If you'd like to use a virtual installation, make sure to add it in the path
+like so::
+
+ <Location />
+ SetHandler python-program
+ PythonHandler paste.modpython
+ PythonPath "['/virtual/project/directory', '/virtual/lib/python2.4/'] + sys.path"
+ PythonOption paste.ini /virtual/project/directory/pasteconfig.ini
+ </Location>
+
+Some WSGI implementations assume that the SCRIPT_NAME environ variable will
+always be equal to "the root URL of the app"; Apache probably won't act as
+you expect in that case. You can add another PythonOption directive to tell
+modpython_gateway to force that behavior:
+
+ PythonOption SCRIPT_NAME /mcontrol
+
+Some WSGI applications need to be cleaned up when Apache exits. You can
+register a cleanup handler with yet another PythonOption directive:
+
+ PythonOption wsgi.cleanup module::function
+
+The module.function will be called with no arguments on server shutdown,
+once for each child process or thread.
+
+This module highly based on Robert Brewer's, here:
+http://projects.amor.org/misc/svn/modpython_gateway.py
+"""
+
+import traceback
+
+try:
+ from mod_python import apache
+except:
+ pass
+from paste.deploy import loadapp
+
+class InputWrapper(object):
+
+ def __init__(self, req):
+ self.req = req
+
+ def close(self):
+ pass
+
+ def read(self, size=-1):
+ return self.req.read(size)
+
+ def readline(self, size=-1):
+ return self.req.readline(size)
+
+ def readlines(self, hint=-1):
+ return self.req.readlines(hint)
+
+ def __iter__(self):
+ line = self.readline()
+ while line:
+ yield line
+ # Notice this won't prefetch the next line; it only
+ # gets called if the generator is resumed.
+ line = self.readline()
+
+
+class ErrorWrapper(object):
+
+ def __init__(self, req):
+ self.req = req
+
+ def flush(self):
+ pass
+
+ def write(self, msg):
+ self.req.log_error(msg)
+
+ def writelines(self, seq):
+ self.write(''.join(seq))
+
+
+bad_value = ("You must provide a PythonOption '%s', either 'on' or 'off', "
+ "when running a version of mod_python < 3.1")
+
+
+class Handler(object):
+
+ def __init__(self, req):
+ self.started = False
+
+ options = req.get_options()
+
+ # Threading and forking
+ try:
+ q = apache.mpm_query
+ threaded = q(apache.AP_MPMQ_IS_THREADED)
+ forked = q(apache.AP_MPMQ_IS_FORKED)
+ except AttributeError:
+ threaded = options.get('multithread', '').lower()
+ if threaded == 'on':
+ threaded = True
+ elif threaded == 'off':
+ threaded = False
+ else:
+ raise ValueError(bad_value % "multithread")
+
+ forked = options.get('multiprocess', '').lower()
+ if forked == 'on':
+ forked = True
+ elif forked == 'off':
+ forked = False
+ else:
+ raise ValueError(bad_value % "multiprocess")
+
+ env = self.environ = dict(apache.build_cgi_env(req))
+
+ if 'SCRIPT_NAME' in options:
+ # Override SCRIPT_NAME and PATH_INFO if requested.
+ env['SCRIPT_NAME'] = options['SCRIPT_NAME']
+ env['PATH_INFO'] = req.uri[len(options['SCRIPT_NAME']):]
+ else:
+ env['SCRIPT_NAME'] = ''
+ env['PATH_INFO'] = req.uri
+
+ env['wsgi.input'] = InputWrapper(req)
+ env['wsgi.errors'] = ErrorWrapper(req)
+ env['wsgi.version'] = (1, 0)
+ env['wsgi.run_once'] = False
+ if env.get("HTTPS") in ('yes', 'on', '1'):
+ env['wsgi.url_scheme'] = 'https'
+ else:
+ env['wsgi.url_scheme'] = 'http'
+ env['wsgi.multithread'] = threaded
+ env['wsgi.multiprocess'] = forked
+
+ self.request = req
+
+ def run(self, application):
+ try:
+ result = application(self.environ, self.start_response)
+ for data in result:
+ self.write(data)
+ if not self.started:
+ self.request.set_content_length(0)
+ if hasattr(result, 'close'):
+ result.close()
+ except:
+ traceback.print_exc(None, self.environ['wsgi.errors'])
+ if not self.started:
+ self.request.status = 500
+ self.request.content_type = 'text/plain'
+ data = "A server error occurred. Please contact the administrator."
+ self.request.set_content_length(len(data))
+ self.request.write(data)
+
+ def start_response(self, status, headers, exc_info=None):
+ if exc_info:
+ try:
+ if self.started:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None
+
+ self.request.status = int(status[:3])
+
+ for key, val in headers:
+ if key.lower() == 'content-length':
+ self.request.set_content_length(int(val))
+ elif key.lower() == 'content-type':
+ self.request.content_type = val
+ else:
+ self.request.headers_out.add(key, val)
+
+ return self.write
+
+ def write(self, data):
+ if not self.started:
+ self.started = True
+ self.request.write(data)
+
+
+startup = None
+cleanup = None
+wsgiapps = {}
+
+def handler(req):
+ options = req.get_options()
+ # Run a startup function if requested.
+ global startup
+ if 'wsgi.startup' in options and not startup:
+ func = options['wsgi.startup']
+ if func:
+ module_name, object_str = func.split('::', 1)
+ module = __import__(module_name, globals(), locals(), [''])
+ startup = apache.resolve_object(module, object_str)
+ startup(req)
+
+ # Register a cleanup function if requested.
+ global cleanup
+ if 'wsgi.cleanup' in options and not cleanup:
+ func = options['wsgi.cleanup']
+ if func:
+ module_name, object_str = func.split('::', 1)
+ module = __import__(module_name, globals(), locals(), [''])
+ cleanup = apache.resolve_object(module, object_str)
+ def cleaner(data):
+ cleanup()
+ try:
+ # apache.register_cleanup wasn't available until 3.1.4.
+ apache.register_cleanup(cleaner)
+ except AttributeError:
+ req.server.register_cleanup(req, cleaner)
+
+ # Import the wsgi 'application' callable and pass it to Handler.run
+ global wsgiapps
+ appini = options.get('paste.ini')
+ app = None
+ if appini:
+ if appini not in wsgiapps:
+ wsgiapps[appini] = loadapp("config:%s" % appini)
+ app = wsgiapps[appini]
+
+ # Import the wsgi 'application' callable and pass it to Handler.run
+ appwsgi = options.get('wsgi.application')
+ if appwsgi and not appini:
+ modname, objname = appwsgi.split('::', 1)
+ module = __import__(modname, globals(), locals(), [''])
+ app = getattr(module, objname)
+
+ Handler(req).run(app)
+
+ # status was set in Handler; always return apache.OK
+ return apache.OK
diff --git a/paste/pony.py b/paste/pony.py
new file mode 100644
index 0000000..fce6aa8
--- /dev/null
+++ b/paste/pony.py
@@ -0,0 +1,57 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+We have a pony and/or a unicorn.
+"""
+from paste.request import construct_url
+
+PONY = """
+eJyFkkFuxCAMRfdzCisbJxK2D5D2JpbMrlI3XXQZDt9PCG0ySgcWIMT79rcN0XClUJlZRB9jVmci
+FmV19khjgRFl0RzrKmqzvY8lRUWFlXvCrD7UbAQR/17NUvGhypAF9og16vWtkC8DzUayS6pN3/dR
+ki0OnpzKjUBFpmlC7zVFRNL1rwoq6PWXXQSnIm9WoTzlM2//ke21o5g/l1ckRhiPbkDZXsKIR7l1
+36hF9uMhnRiVjI8UgYjlsIKCrXXpcA9iX5y7zMmtG0fUpW61Ssttipf6cp3WARfkMVoYFryi2a+w
+o/2dhW0OXfcMTnmh53oR9egzPs+qkpY9IKxdUVRP5wHO7UDAuI6moA2N+/z4vtc2k8B+AIBimVU=
+"""
+
+UNICORN = """
+eJyVVD1vhDAM3e9XeAtIxB5P6qlDx0OMXVBzSpZOHdsxP762E0JAnMgZ8Zn37OePAPC60eV1Dl5b
+SS7fB6DmQNGhtegpNlPIQS8HmkYGdSqNqDF9wcMYus4TuBYGsZwIPqXfEoNir5K+R3mbzhlR4JMW
+eGpikPpn9wHl2sDgEH1270guZwzKDRf3nTztMvfI5r3fJqEmNxdCyISBcWjNgjPG8Egg2hgT3mJi
+KBwNvmPB1hbWJ3TwBfMlqdTzxNyDE2H8zOD5HA4KkqJGPVY/TwnxmPA82kdSJNj7zs+R0d1pB+JO
+xn2DKgsdxAfFS2pfTSD0Fb6Uzv7dCQSvE5JmZQEQ90vNjBU1GPuGQpCPS8cGo+dQgjIKqxnJTXbw
+ucFzPFVIJXtzk6BXKGPnYsKzvFmGx7A0j6Zqvlvk5rETXbMWTGWj0RFc8QNPYVfhJfMMniCPazWJ
+lGtPZecIGJWW6oL2hpbWRZEkChe8eg5Wb7xx/MBZBFjxeZPEss+mRQ3Uhc8WQv684seSRO7i3nb4
+7HlKUg8sraz47LmXyh8S0somADvoUpoHjGWl+rUkF0H+EIf/gbyyMg58BBk6L634/fkHUCodMw==
+"""
+
+
+class PonyMiddleware(object):
+
+ def __init__(self, application):
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ path_info = environ.get('PATH_INFO', '')
+ if path_info == '/pony':
+ url = construct_url(environ, with_query_string=False)
+ if 'horn' in environ.get('QUERY_STRING', ''):
+ data = UNICORN
+ link = 'remove horn!'
+ else:
+ data = PONY
+ url += '?horn'
+ link = 'add horn!'
+ msg = data.decode('base64').decode('zlib')
+ msg = '<pre>%s\n<a href="%s">%s</a></pre>' % (
+ msg, url, link)
+ start_response('200 OK', [('content-type', 'text/html')])
+ return [msg]
+ else:
+ return self.application(environ, start_response)
+
+def make_pony(app, global_conf):
+ """
+ Adds pony power to any application, at /pony
+ """
+ return PonyMiddleware(app)
+
diff --git a/paste/progress.py b/paste/progress.py
new file mode 100755
index 0000000..57bf0bd
--- /dev/null
+++ b/paste/progress.py
@@ -0,0 +1,222 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# This code was written with funding by http://prometheusresearch.com
+"""
+Upload Progress Monitor
+
+This is a WSGI middleware component which monitors the status of files
+being uploaded. It includes a small query application which will return
+a list of all files being uploaded by particular session/user.
+
+>>> from paste.httpserver import serve
+>>> from paste.urlmap import URLMap
+>>> from paste.auth.basic import AuthBasicHandler
+>>> from paste.debug.debugapp import SlowConsumer, SimpleApplication
+>>> # from paste.progress import *
+>>> realm = 'Test Realm'
+>>> def authfunc(username, password):
+... return username == password
+>>> map = URLMap({})
+>>> ups = UploadProgressMonitor(map, threshold=1024)
+>>> map['/upload'] = SlowConsumer()
+>>> map['/simple'] = SimpleApplication()
+>>> map['/report'] = UploadProgressReporter(ups)
+>>> serve(AuthBasicHandler(ups, realm, authfunc))
+serving on...
+
+.. note::
+
+ This is experimental, and will change in the future.
+"""
+import time
+from paste.wsgilib import catch_errors
+
+DEFAULT_THRESHOLD = 1024 * 1024 # one megabyte
+DEFAULT_TIMEOUT = 60*5 # five minutes
+ENVIRON_RECEIVED = 'paste.bytes_received'
+REQUEST_STARTED = 'paste.request_started'
+REQUEST_FINISHED = 'paste.request_finished'
+
+class _ProgressFile(object):
+ """
+ This is the input-file wrapper used to record the number of
+ ``paste.bytes_received`` for the given request.
+ """
+
+ def __init__(self, environ, rfile):
+ self._ProgressFile_environ = environ
+ self._ProgressFile_rfile = rfile
+ self.flush = rfile.flush
+ self.write = rfile.write
+ self.writelines = rfile.writelines
+
+ def __iter__(self):
+ environ = self._ProgressFile_environ
+ riter = iter(self._ProgressFile_rfile)
+ def iterwrap():
+ for chunk in riter:
+ environ[ENVIRON_RECEIVED] += len(chunk)
+ yield chunk
+ return iter(iterwrap)
+
+ def read(self, size=-1):
+ chunk = self._ProgressFile_rfile.read(size)
+ self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
+ return chunk
+
+ def readline(self):
+ chunk = self._ProgressFile_rfile.readline()
+ self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
+ return chunk
+
+ def readlines(self, hint=None):
+ chunk = self._ProgressFile_rfile.readlines(hint)
+ self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
+ return chunk
+
+class UploadProgressMonitor(object):
+ """
+ monitors and reports on the status of uploads in progress
+
+ Parameters:
+
+ ``application``
+
+ This is the next application in the WSGI stack.
+
+ ``threshold``
+
+ This is the size in bytes that is needed for the
+ upload to be included in the monitor.
+
+ ``timeout``
+
+ This is the amount of time (in seconds) that a upload
+ remains in the monitor after it has finished.
+
+ Methods:
+
+ ``uploads()``
+
+ This returns a list of ``environ`` dict objects for each
+ upload being currently monitored, or finished but whose time
+ has not yet expired.
+
+ For each request ``environ`` that is monitored, there are several
+ variables that are stored:
+
+ ``paste.bytes_received``
+
+ This is the total number of bytes received for the given
+ request; it can be compared with ``CONTENT_LENGTH`` to
+ build a percentage complete. This is an integer value.
+
+ ``paste.request_started``
+
+ This is the time (in seconds) when the request was started
+ as obtained from ``time.time()``. One would want to format
+ this for presentation to the user, if necessary.
+
+ ``paste.request_finished``
+
+ This is the time (in seconds) when the request was finished,
+ canceled, or otherwise disconnected. This is None while
+ the given upload is still in-progress.
+
+ TODO: turn monitor into a queue and purge queue of finished
+ requests that have passed the timeout period.
+ """
+ def __init__(self, application, threshold=None, timeout=None):
+ self.application = application
+ self.threshold = threshold or DEFAULT_THRESHOLD
+ self.timeout = timeout or DEFAULT_TIMEOUT
+ self.monitor = []
+
+ def __call__(self, environ, start_response):
+ length = environ.get('CONTENT_LENGTH', 0)
+ if length and int(length) > self.threshold:
+ # replace input file object
+ self.monitor.append(environ)
+ environ[ENVIRON_RECEIVED] = 0
+ environ[REQUEST_STARTED] = time.time()
+ environ[REQUEST_FINISHED] = None
+ environ['wsgi.input'] = \
+ _ProgressFile(environ, environ['wsgi.input'])
+ def finalizer(exc_info=None):
+ environ[REQUEST_FINISHED] = time.time()
+ return catch_errors(self.application, environ,
+ start_response, finalizer, finalizer)
+ return self.application(environ, start_response)
+
+ def uploads(self):
+ return self.monitor
+
+class UploadProgressReporter(object):
+ """
+ reports on the progress of uploads for a given user
+
+ This reporter returns a JSON file (for use in AJAX) listing the
+ uploads in progress for the given user. By default, this reporter
+ uses the ``REMOTE_USER`` environment to compare between the current
+ request and uploads in-progress. If they match, then a response
+ record is formed.
+
+ ``match()``
+
+ This member function can be overriden to provide alternative
+ matching criteria. It takes two environments, the first
+ is the current request, the second is a current upload.
+
+ ``report()``
+
+ This member function takes an environment and builds a
+ ``dict`` that will be used to create a JSON mapping for
+ the given upload. By default, this just includes the
+ percent complete and the request url.
+
+ """
+ def __init__(self, monitor):
+ self.monitor = monitor
+
+ def match(self, search_environ, upload_environ):
+ if search_environ.get('REMOTE_USER', None) == \
+ upload_environ.get('REMOTE_USER', 0):
+ return True
+ return False
+
+ def report(self, environ):
+ retval = { 'started': time.strftime("%Y-%m-%d %H:%M:%S",
+ time.gmtime(environ[REQUEST_STARTED])),
+ 'finished': '',
+ 'content_length': environ.get('CONTENT_LENGTH'),
+ 'bytes_received': environ[ENVIRON_RECEIVED],
+ 'path_info': environ.get('PATH_INFO',''),
+ 'query_string': environ.get('QUERY_STRING','')}
+ finished = environ[REQUEST_FINISHED]
+ if finished:
+ retval['finished'] = time.strftime("%Y:%m:%d %H:%M:%S",
+ time.gmtime(finished))
+ return retval
+
+ def __call__(self, environ, start_response):
+ body = []
+ for map in [self.report(env) for env in self.monitor.uploads()
+ if self.match(environ, env)]:
+ parts = []
+ for k, v in map.items():
+ v = str(v).replace("\\", "\\\\").replace('"', '\\"')
+ parts.append('%s: "%s"' % (k, v))
+ body.append("{ %s }" % ", ".join(parts))
+ body = "[ %s ]" % ", ".join(body)
+ start_response("200 OK", [('Content-Type', 'text/plain'),
+ ('Content-Length', len(body))])
+ return [body]
+
+__all__ = ['UploadProgressMonitor', 'UploadProgressReporter']
+
+if "__main__" == __name__:
+ import doctest
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
diff --git a/paste/proxy.py b/paste/proxy.py
new file mode 100644
index 0000000..155128f
--- /dev/null
+++ b/paste/proxy.py
@@ -0,0 +1,283 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+An application that proxies WSGI requests to a remote server.
+
+TODO:
+
+* Send ``Via`` header? It's not clear to me this is a Via in the
+ style of a typical proxy.
+
+* Other headers or metadata? I put in X-Forwarded-For, but that's it.
+
+* Signed data of non-HTTP keys? This would be for things like
+ REMOTE_USER.
+
+* Something to indicate what the original URL was? The original host,
+ scheme, and base path.
+
+* Rewriting ``Location`` headers? mod_proxy does this.
+
+* Rewriting body? (Probably not on this one -- that can be done with
+ a different middleware that wraps this middleware)
+
+* Example::
+
+ use = egg:Paste#proxy
+ address = http://server3:8680/exist/rest/db/orgs/sch/config/
+ allowed_request_methods = GET
+
+"""
+
+import httplib
+import urlparse
+import urllib
+
+from paste import httpexceptions
+from paste.util.converters import aslist
+
+# Remove these headers from response (specify lower case header
+# names):
+filtered_headers = (
+ 'transfer-encoding',
+ 'connection',
+ 'keep-alive',
+ 'proxy-authenticate',
+ 'proxy-authorization',
+ 'te',
+ 'trailers',
+ 'upgrade',
+)
+
+class Proxy(object):
+
+ def __init__(self, address, allowed_request_methods=(),
+ suppress_http_headers=()):
+ self.address = address
+ self.parsed = urlparse.urlsplit(address)
+ self.scheme = self.parsed[0].lower()
+ self.host = self.parsed[1]
+ self.path = self.parsed[2]
+ self.allowed_request_methods = [
+ x.lower() for x in allowed_request_methods if x]
+
+ self.suppress_http_headers = [
+ x.lower() for x in suppress_http_headers if x]
+
+ def __call__(self, environ, start_response):
+ if (self.allowed_request_methods and
+ environ['REQUEST_METHOD'].lower() not in self.allowed_request_methods):
+ return httpexceptions.HTTPBadRequest("Disallowed")(environ, start_response)
+
+ if self.scheme == 'http':
+ ConnClass = httplib.HTTPConnection
+ elif self.scheme == 'https':
+ ConnClass = httplib.HTTPSConnection
+ else:
+ raise ValueError(
+ "Unknown scheme for %r: %r" % (self.address, self.scheme))
+ conn = ConnClass(self.host)
+ headers = {}
+ for key, value in environ.items():
+ if key.startswith('HTTP_'):
+ key = key[5:].lower().replace('_', '-')
+ if key == 'host' or key in self.suppress_http_headers:
+ continue
+ headers[key] = value
+ headers['host'] = self.host
+ if 'REMOTE_ADDR' in environ:
+ headers['x-forwarded-for'] = environ['REMOTE_ADDR']
+ if environ.get('CONTENT_TYPE'):
+ headers['content-type'] = environ['CONTENT_TYPE']
+ if environ.get('CONTENT_LENGTH'):
+ if environ['CONTENT_LENGTH'] == '-1':
+ # This is a special case, where the content length is basically undetermined
+ body = environ['wsgi.input'].read(-1)
+ headers['content-length'] = str(len(body))
+ else:
+ headers['content-length'] = environ['CONTENT_LENGTH']
+ length = int(environ['CONTENT_LENGTH'])
+ body = environ['wsgi.input'].read(length)
+ else:
+ body = ''
+
+ path_info = urllib.quote(environ['PATH_INFO'])
+ if self.path:
+ request_path = path_info
+ if request_path and request_path[0] == '/':
+ request_path = request_path[1:]
+
+ path = urlparse.urljoin(self.path, request_path)
+ else:
+ path = path_info
+ if environ.get('QUERY_STRING'):
+ path += '?' + environ['QUERY_STRING']
+
+ conn.request(environ['REQUEST_METHOD'],
+ path,
+ body, headers)
+ res = conn.getresponse()
+ headers_out = parse_headers(res.msg)
+
+ status = '%s %s' % (res.status, res.reason)
+ start_response(status, headers_out)
+ # @@: Default?
+ length = res.getheader('content-length')
+ if length is not None:
+ body = res.read(int(length))
+ else:
+ body = res.read()
+ conn.close()
+ return [body]
+
+def make_proxy(global_conf, address, allowed_request_methods="",
+ suppress_http_headers=""):
+ """
+ Make a WSGI application that proxies to another address:
+
+ ``address``
+ the full URL ending with a trailing ``/``
+
+ ``allowed_request_methods``:
+ a space seperated list of request methods (e.g., ``GET POST``)
+
+ ``suppress_http_headers``
+ a space seperated list of http headers (lower case, without
+ the leading ``http_``) that should not be passed on to target
+ host
+ """
+ allowed_request_methods = aslist(allowed_request_methods)
+ suppress_http_headers = aslist(suppress_http_headers)
+ return Proxy(
+ address,
+ allowed_request_methods=allowed_request_methods,
+ suppress_http_headers=suppress_http_headers)
+
+
+class TransparentProxy(object):
+
+ """
+ A proxy that sends the request just as it was given, including
+ respecting HTTP_HOST, wsgi.url_scheme, etc.
+
+ This is a way of translating WSGI requests directly to real HTTP
+ requests. All information goes in the environment; modify it to
+ modify the way the request is made.
+
+ If you specify ``force_host`` (and optionally ``force_scheme``)
+ then HTTP_HOST won't be used to determine where to connect to;
+ instead a specific host will be connected to, but the ``Host``
+ header in the request will remain intact.
+ """
+
+ def __init__(self, force_host=None,
+ force_scheme='http'):
+ self.force_host = force_host
+ self.force_scheme = force_scheme
+
+ def __repr__(self):
+ return '<%s %s force_host=%r force_scheme=%r>' % (
+ self.__class__.__name__,
+ hex(id(self)),
+ self.force_host, self.force_scheme)
+
+ def __call__(self, environ, start_response):
+ scheme = environ['wsgi.url_scheme']
+ if self.force_host is None:
+ conn_scheme = scheme
+ else:
+ conn_scheme = self.force_scheme
+ if conn_scheme == 'http':
+ ConnClass = httplib.HTTPConnection
+ elif conn_scheme == 'https':
+ ConnClass = httplib.HTTPSConnection
+ else:
+ raise ValueError(
+ "Unknown scheme %r" % scheme)
+ if 'HTTP_HOST' not in environ:
+ raise ValueError(
+ "WSGI environ must contain an HTTP_HOST key")
+ host = environ['HTTP_HOST']
+ if self.force_host is None:
+ conn_host = host
+ else:
+ conn_host = self.force_host
+ conn = ConnClass(conn_host)
+ headers = {}
+ for key, value in environ.items():
+ if key.startswith('HTTP_'):
+ key = key[5:].lower().replace('_', '-')
+ headers[key] = value
+ headers['host'] = host
+ if 'REMOTE_ADDR' in environ and 'HTTP_X_FORWARDED_FOR' not in environ:
+ headers['x-forwarded-for'] = environ['REMOTE_ADDR']
+ if environ.get('CONTENT_TYPE'):
+ headers['content-type'] = environ['CONTENT_TYPE']
+ if environ.get('CONTENT_LENGTH'):
+ length = int(environ['CONTENT_LENGTH'])
+ body = environ['wsgi.input'].read(length)
+ if length == -1:
+ environ['CONTENT_LENGTH'] = str(len(body))
+ elif 'CONTENT_LENGTH' not in environ:
+ body = ''
+ length = 0
+ else:
+ body = ''
+ length = 0
+
+ path = (environ.get('SCRIPT_NAME', '')
+ + environ.get('PATH_INFO', ''))
+ path = urllib.quote(path)
+ if 'QUERY_STRING' in environ:
+ path += '?' + environ['QUERY_STRING']
+ conn.request(environ['REQUEST_METHOD'],
+ path, body, headers)
+ res = conn.getresponse()
+ headers_out = parse_headers(res.msg)
+
+ status = '%s %s' % (res.status, res.reason)
+ start_response(status, headers_out)
+ # @@: Default?
+ length = res.getheader('content-length')
+ if length is not None:
+ body = res.read(int(length))
+ else:
+ body = res.read()
+ conn.close()
+ return [body]
+
+def parse_headers(message):
+ """
+ Turn a Message object into a list of WSGI-style headers.
+ """
+ headers_out = []
+ for full_header in message.headers:
+ if not full_header:
+ # Shouldn't happen, but we'll just ignore
+ continue
+ if full_header[0].isspace():
+ # Continuation line, add to the last header
+ if not headers_out:
+ raise ValueError(
+ "First header starts with a space (%r)" % full_header)
+ last_header, last_value = headers_out.pop()
+ value = last_value + ' ' + full_header.strip()
+ headers_out.append((last_header, value))
+ continue
+ try:
+ header, value = full_header.split(':', 1)
+ except:
+ raise ValueError("Invalid header: %r" % full_header)
+ value = value.strip()
+ if header.lower() not in filtered_headers:
+ headers_out.append((header, value))
+ return headers_out
+
+def make_transparent_proxy(
+ global_conf, force_host=None, force_scheme='http'):
+ """
+ Create a proxy that connects to a specific host, but does
+ absolutely no other filtering, including the Host header.
+ """
+ return TransparentProxy(force_host=force_host,
+ force_scheme=force_scheme)
diff --git a/paste/recursive.py b/paste/recursive.py
new file mode 100644
index 0000000..8ea7038
--- /dev/null
+++ b/paste/recursive.py
@@ -0,0 +1,405 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware to make internal requests and forward requests internally.
+
+When applied, several keys are added to the environment that will allow
+you to trigger recursive redirects and forwards.
+
+ paste.recursive.include:
+ When you call
+ ``environ['paste.recursive.include'](new_path_info)`` a response
+ will be returned. The response has a ``body`` attribute, a
+ ``status`` attribute, and a ``headers`` attribute.
+
+ paste.recursive.script_name:
+ The ``SCRIPT_NAME`` at the point that recursive lives. Only
+ paths underneath this path can be redirected to.
+
+ paste.recursive.old_path_info:
+ A list of previous ``PATH_INFO`` values from previous redirects.
+
+Raise ``ForwardRequestException(new_path_info)`` to do a forward
+(aborting the current request).
+"""
+
+from cStringIO import StringIO
+import warnings
+
+__all__ = ['RecursiveMiddleware']
+__pudge_all__ = ['RecursiveMiddleware', 'ForwardRequestException']
+
+class RecursionLoop(AssertionError):
+ # Subclasses AssertionError for legacy reasons
+ """Raised when a recursion enters into a loop"""
+
+class CheckForRecursionMiddleware(object):
+ def __init__(self, app, env):
+ self.app = app
+ self.env = env
+
+ def __call__(self, environ, start_response):
+ path_info = environ.get('PATH_INFO','')
+ if path_info in self.env.get(
+ 'paste.recursive.old_path_info', []):
+ raise RecursionLoop(
+ "Forwarding loop detected; %r visited twice (internal "
+ "redirect path: %s)"
+ % (path_info, self.env['paste.recursive.old_path_info']))
+ old_path_info = self.env.setdefault('paste.recursive.old_path_info', [])
+ old_path_info.append(self.env.get('PATH_INFO', ''))
+ return self.app(environ, start_response)
+
+class RecursiveMiddleware(object):
+
+ """
+ A WSGI middleware that allows for recursive and forwarded calls.
+ All these calls go to the same 'application', but presumably that
+ application acts differently with different URLs. The forwarded
+ URLs must be relative to this container.
+
+ Interface is entirely through the ``paste.recursive.forward`` and
+ ``paste.recursive.include`` environmental keys.
+ """
+
+ def __init__(self, application, global_conf=None):
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ environ['paste.recursive.forward'] = Forwarder(
+ self.application,
+ environ,
+ start_response)
+ environ['paste.recursive.include'] = Includer(
+ self.application,
+ environ,
+ start_response)
+ environ['paste.recursive.include_app_iter'] = IncluderAppIter(
+ self.application,
+ environ,
+ start_response)
+ my_script_name = environ.get('SCRIPT_NAME', '')
+ environ['paste.recursive.script_name'] = my_script_name
+ try:
+ return self.application(environ, start_response)
+ except ForwardRequestException, e:
+ middleware = CheckForRecursionMiddleware(
+ e.factory(self), environ)
+ return middleware(environ, start_response)
+
+class ForwardRequestException(Exception):
+ """
+ Used to signal that a request should be forwarded to a different location.
+
+ ``url``
+ The URL to forward to starting with a ``/`` and relative to
+ ``RecursiveMiddleware``. URL fragments can also contain query strings
+ so ``/error?code=404`` would be a valid URL fragment.
+
+ ``environ``
+ An altertative WSGI environment dictionary to use for the forwarded
+ request. If specified is used *instead* of the ``url_fragment``
+
+ ``factory``
+ If specifed ``factory`` is used instead of ``url`` or ``environ``.
+ ``factory`` is a callable that takes a WSGI application object
+ as the first argument and returns an initialised WSGI middleware
+ which can alter the forwarded response.
+
+ Basic usage (must have ``RecursiveMiddleware`` present) :
+
+ .. code-block:: python
+
+ from paste.recursive import ForwardRequestException
+ def app(environ, start_response):
+ if environ['PATH_INFO'] == '/hello':
+ start_response("200 OK", [('Content-type', 'text/plain')])
+ return ['Hello World!']
+ elif environ['PATH_INFO'] == '/error':
+ start_response("404 Not Found", [('Content-type', 'text/plain')])
+ return ['Page not found']
+ else:
+ raise ForwardRequestException('/error')
+
+ from paste.recursive import RecursiveMiddleware
+ app = RecursiveMiddleware(app)
+
+ If you ran this application and visited ``/hello`` you would get a
+ ``Hello World!`` message. If you ran the application and visited
+ ``/not_found`` a ``ForwardRequestException`` would be raised and the caught
+ by the ``RecursiveMiddleware``. The ``RecursiveMiddleware`` would then
+ return the headers and response from the ``/error`` URL but would display
+ a ``404 Not found`` status message.
+
+ You could also specify an ``environ`` dictionary instead of a url. Using
+ the same example as before:
+
+ .. code-block:: python
+
+ def app(environ, start_response):
+ ... same as previous example ...
+ else:
+ new_environ = environ.copy()
+ new_environ['PATH_INFO'] = '/error'
+ raise ForwardRequestException(environ=new_environ)
+
+ Finally, if you want complete control over every aspect of the forward you
+ can specify a middleware factory. For example to keep the old status code
+ but use the headers and resposne body from the forwarded response you might
+ do this:
+
+ .. code-block:: python
+
+ from paste.recursive import ForwardRequestException
+ from paste.recursive import RecursiveMiddleware
+ from paste.errordocument import StatusKeeper
+
+ def app(environ, start_response):
+ if environ['PATH_INFO'] == '/hello':
+ start_response("200 OK", [('Content-type', 'text/plain')])
+ return ['Hello World!']
+ elif environ['PATH_INFO'] == '/error':
+ start_response("404 Not Found", [('Content-type', 'text/plain')])
+ return ['Page not found']
+ else:
+ def factory(app):
+ return StatusKeeper(app, status='404 Not Found', url='/error')
+ raise ForwardRequestException(factory=factory)
+
+ app = RecursiveMiddleware(app)
+ """
+
+ def __init__(
+ self,
+ url=None,
+ environ={},
+ factory=None,
+ path_info=None):
+ # Check no incompatible options have been chosen
+ if factory and url:
+ raise TypeError(
+ 'You cannot specify factory and a url in '
+ 'ForwardRequestException')
+ elif factory and environ:
+ raise TypeError(
+ 'You cannot specify factory and environ in '
+ 'ForwardRequestException')
+ if url and environ:
+ raise TypeError(
+ 'You cannot specify environ and url in '
+ 'ForwardRequestException')
+
+ # set the path_info or warn about its use.
+ if path_info:
+ if not url:
+ warnings.warn(
+ "ForwardRequestException(path_info=...) has been deprecated; please "
+ "use ForwardRequestException(url=...)",
+ DeprecationWarning, 2)
+ else:
+ raise TypeError('You cannot use url and path_info in ForwardRequestException')
+ self.path_info = path_info
+
+ # If the url can be treated as a path_info do that
+ if url and not '?' in str(url):
+ self.path_info = url
+
+ # Base middleware
+ class ForwardRequestExceptionMiddleware(object):
+ def __init__(self, app):
+ self.app = app
+
+ # Otherwise construct the appropriate middleware factory
+ if hasattr(self, 'path_info'):
+ p = self.path_info
+ def factory_(app):
+ class PathInfoForward(ForwardRequestExceptionMiddleware):
+ def __call__(self, environ, start_response):
+ environ['PATH_INFO'] = p
+ return self.app(environ, start_response)
+ return PathInfoForward(app)
+ self.factory = factory_
+ elif url:
+ def factory_(app):
+ class URLForward(ForwardRequestExceptionMiddleware):
+ def __call__(self, environ, start_response):
+ environ['PATH_INFO'] = url.split('?')[0]
+ environ['QUERY_STRING'] = url.split('?')[1]
+ return self.app(environ, start_response)
+ return URLForward(app)
+ self.factory = factory_
+ elif environ:
+ def factory_(app):
+ class EnvironForward(ForwardRequestExceptionMiddleware):
+ def __call__(self, environ_, start_response):
+ return self.app(environ, start_response)
+ return EnvironForward(app)
+ self.factory = factory_
+ else:
+ self.factory = factory
+
+class Recursive(object):
+
+ def __init__(self, application, environ, start_response):
+ self.application = application
+ self.original_environ = environ.copy()
+ self.previous_environ = environ
+ self.start_response = start_response
+
+ def __call__(self, path, extra_environ=None):
+ """
+ `extra_environ` is an optional dictionary that is also added
+ to the forwarded request. E.g., ``{'HTTP_HOST': 'new.host'}``
+ could be used to forward to a different virtual host.
+ """
+ environ = self.original_environ.copy()
+ if extra_environ:
+ environ.update(extra_environ)
+ environ['paste.recursive.previous_environ'] = self.previous_environ
+ base_path = self.original_environ.get('SCRIPT_NAME')
+ if path.startswith('/'):
+ assert path.startswith(base_path), (
+ "You can only forward requests to resources under the "
+ "path %r (not %r)" % (base_path, path))
+ path = path[len(base_path)+1:]
+ assert not path.startswith('/')
+ path_info = '/' + path
+ environ['PATH_INFO'] = path_info
+ environ['REQUEST_METHOD'] = 'GET'
+ environ['CONTENT_LENGTH'] = '0'
+ environ['CONTENT_TYPE'] = ''
+ environ['wsgi.input'] = StringIO('')
+ return self.activate(environ)
+
+ def activate(self, environ):
+ raise NotImplementedError
+
+ def __repr__(self):
+ return '<%s.%s from %s>' % (
+ self.__class__.__module__,
+ self.__class__.__name__,
+ self.original_environ.get('SCRIPT_NAME') or '/')
+
+class Forwarder(Recursive):
+
+ """
+ The forwarder will try to restart the request, except with
+ the new `path` (replacing ``PATH_INFO`` in the request).
+
+ It must not be called after and headers have been returned.
+ It returns an iterator that must be returned back up the call
+ stack, so it must be used like:
+
+ .. code-block:: python
+
+ return environ['paste.recursive.forward'](path)
+
+ Meaningful transformations cannot be done, since headers are
+ sent directly to the server and cannot be inspected or
+ rewritten.
+ """
+
+ def activate(self, environ):
+ warnings.warn(
+ "recursive.Forwarder has been deprecated; please use "
+ "ForwardRequestException",
+ DeprecationWarning, 2)
+ return self.application(environ, self.start_response)
+
+
+class Includer(Recursive):
+
+ """
+ Starts another request with the given path and adding or
+ overwriting any values in the `extra_environ` dictionary.
+ Returns an IncludeResponse object.
+ """
+
+ def activate(self, environ):
+ response = IncludedResponse()
+ def start_response(status, headers, exc_info=None):
+ if exc_info:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ response.status = status
+ response.headers = headers
+ return response.write
+ app_iter = self.application(environ, start_response)
+ try:
+ for s in app_iter:
+ response.write(s)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ response.close()
+ return response
+
+class IncludedResponse(object):
+
+ def __init__(self):
+ self.headers = None
+ self.status = None
+ self.output = StringIO()
+ self.str = None
+
+ def close(self):
+ self.str = self.output.getvalue()
+ self.output.close()
+ self.output = None
+
+ def write(self, s):
+ assert self.output is not None, (
+ "This response has already been closed and no further data "
+ "can be written.")
+ self.output.write(s)
+
+ def __str__(self):
+ return self.body
+
+ def body__get(self):
+ if self.str is None:
+ return self.output.getvalue()
+ else:
+ return self.str
+ body = property(body__get)
+
+
+class IncluderAppIter(Recursive):
+ """
+ Like Includer, but just stores the app_iter response
+ (be sure to call close on the response!)
+ """
+
+ def activate(self, environ):
+ response = IncludedAppIterResponse()
+ def start_response(status, headers, exc_info=None):
+ if exc_info:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ response.status = status
+ response.headers = headers
+ return response.write
+ app_iter = self.application(environ, start_response)
+ response.app_iter = app_iter
+ return response
+
+class IncludedAppIterResponse(object):
+
+ def __init__(self):
+ self.status = None
+ self.headers = None
+ self.accumulated = []
+ self.app_iter = None
+ self._closed = False
+
+ def close(self):
+ assert not self._closed, (
+ "Tried to close twice")
+ if hasattr(self.app_iter, 'close'):
+ self.app_iter.close()
+
+ def write(self, s):
+ self.accumulated.append
+
+def make_recursive_middleware(app, global_conf):
+ return RecursiveMiddleware(app)
+
+make_recursive_middleware.__doc__ = __doc__
diff --git a/paste/registry.py b/paste/registry.py
new file mode 100644
index 0000000..ef6516c
--- /dev/null
+++ b/paste/registry.py
@@ -0,0 +1,581 @@
+# (c) 2005 Ben Bangert
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""Registry for handling request-local module globals sanely
+
+Dealing with module globals in a thread-safe way is good if your
+application is the sole responder in a thread, however that approach fails
+to properly account for various scenarios that occur with WSGI applications
+and middleware.
+
+What is actually needed in the case where a module global is desired that
+is always set properly depending on the current request, is a stacked
+thread-local object. Such an object is popped or pushed during the request
+cycle so that it properly represents the object that should be active for
+the current request.
+
+To make it easy to deal with such variables, this module provides a special
+StackedObjectProxy class which you can instantiate and attach to your
+module where you'd like others to access it. The object you'd like this to
+actually "be" during the request is then registered with the
+RegistryManager middleware, which ensures that for the scope of the current
+WSGI application everything will work properly.
+
+Example:
+
+.. code-block:: python
+
+ #yourpackage/__init__.py
+
+ from paste.registry import RegistryManager, StackedObjectProxy
+ myglobal = StackedObjectProxy()
+
+ #wsgi app stack
+ app = RegistryManager(yourapp)
+
+ #inside your wsgi app
+ class yourapp(object):
+ def __call__(self, environ, start_response):
+ obj = someobject # The request-local object you want to access
+ # via yourpackage.myglobal
+ if environ.has_key('paste.registry'):
+ environ['paste.registry'].register(myglobal, obj)
+
+You will then be able to import yourpackage anywhere in your WSGI app or in
+the calling stack below it and be assured that it is using the object you
+registered with Registry.
+
+RegistryManager can be in the WSGI stack multiple times, each time it
+appears it registers a new request context.
+
+
+Performance
+===========
+
+The overhead of the proxy object is very minimal, however if you are using
+proxy objects extensively (Thousands of accesses per request or more), there
+are some ways to avoid them. A proxy object runs approximately 3-20x slower
+than direct access to the object, this is rarely your performance bottleneck
+when developing web applications.
+
+Should you be developing a system which may be accessing the proxy object
+thousands of times per request, the performance of the proxy will start to
+become more noticeable. In that circumstance, the problem can be avoided by
+getting at the actual object via the proxy with the ``_current_obj`` function:
+
+.. code-block:: python
+
+ #sessions.py
+ Session = StackedObjectProxy()
+ # ... initialization code, etc.
+
+ # somemodule.py
+ import sessions
+
+ def somefunc():
+ session = sessions.Session._current_obj()
+ # ... tons of session access
+
+This way the proxy is used only once to retrieve the object for the current
+context and the overhead is minimized while still making it easy to access
+the underlying object. The ``_current_obj`` function is preceded by an
+underscore to more likely avoid clashing with the contained object's
+attributes.
+
+**NOTE:** This is *highly* unlikely to be an issue in the vast majority of
+cases, and requires incredibly large amounts of proxy object access before
+one should consider the proxy object to be causing slow-downs. This section
+is provided solely in the extremely rare case that it is an issue so that a
+quick way to work around it is documented.
+
+"""
+import sys
+import paste.util.threadinglocal as threadinglocal
+
+__all__ = ['StackedObjectProxy', 'RegistryManager', 'StackedObjectRestorer',
+ 'restorer']
+
+class NoDefault(object): pass
+
+class StackedObjectProxy(object):
+ """Track an object instance internally using a stack
+
+ The StackedObjectProxy proxies access to an object internally using a
+ stacked thread-local. This makes it safe for complex WSGI environments
+ where access to the object may be desired in multiple places without
+ having to pass the actual object around.
+
+ New objects are added to the top of the stack with _push_object while
+ objects can be removed with _pop_object.
+
+ """
+ def __init__(self, default=NoDefault, name="Default"):
+ """Create a new StackedObjectProxy
+
+ If a default is given, its used in every thread if no other object
+ has been pushed on.
+
+ """
+ self.__dict__['____name__'] = name
+ self.__dict__['____local__'] = threadinglocal.local()
+ if default is not NoDefault:
+ self.__dict__['____default_object__'] = default
+
+ def __dir__(self):
+ """Return a list of the StackedObjectProxy's and proxied
+ object's (if one exists) names.
+ """
+ dir_list = dir(self.__class__) + self.__dict__.keys()
+ try:
+ dir_list.extend(dir(self._current_obj()))
+ except TypeError:
+ pass
+ dir_list.sort()
+ return dir_list
+
+ def __getattr__(self, attr):
+ return getattr(self._current_obj(), attr)
+
+ def __setattr__(self, attr, value):
+ setattr(self._current_obj(), attr, value)
+
+ def __delattr__(self, name):
+ delattr(self._current_obj(), name)
+
+ def __getitem__(self, key):
+ return self._current_obj()[key]
+
+ def __setitem__(self, key, value):
+ self._current_obj()[key] = value
+
+ def __delitem__(self, key):
+ del self._current_obj()[key]
+
+ def __call__(self, *args, **kw):
+ return self._current_obj()(*args, **kw)
+
+ def __repr__(self):
+ try:
+ return repr(self._current_obj())
+ except (TypeError, AttributeError):
+ return '<%s.%s object at 0x%x>' % (self.__class__.__module__,
+ self.__class__.__name__,
+ id(self))
+
+ def __iter__(self):
+ return iter(self._current_obj())
+
+ def __len__(self):
+ return len(self._current_obj())
+
+ def __contains__(self, key):
+ return key in self._current_obj()
+
+ def __nonzero__(self):
+ return bool(self._current_obj())
+
+ def _current_obj(self):
+ """Returns the current active object being proxied to
+
+ In the event that no object was pushed, the default object if
+ provided will be used. Otherwise, a TypeError will be raised.
+
+ """
+ try:
+ objects = self.____local__.objects
+ except AttributeError:
+ objects = None
+ if objects:
+ return objects[-1]
+ else:
+ obj = self.__dict__.get('____default_object__', NoDefault)
+ if obj is not NoDefault:
+ return obj
+ else:
+ raise TypeError(
+ 'No object (name: %s) has been registered for this '
+ 'thread' % self.____name__)
+
+ def _push_object(self, obj):
+ """Make ``obj`` the active object for this thread-local.
+
+ This should be used like:
+
+ .. code-block:: python
+
+ obj = yourobject()
+ module.glob = StackedObjectProxy()
+ module.glob._push_object(obj)
+ try:
+ ... do stuff ...
+ finally:
+ module.glob._pop_object(conf)
+
+ """
+ try:
+ self.____local__.objects.append(obj)
+ except AttributeError:
+ self.____local__.objects = []
+ self.____local__.objects.append(obj)
+
+ def _pop_object(self, obj=None):
+ """Remove a thread-local object.
+
+ If ``obj`` is given, it is checked against the popped object and an
+ error is emitted if they don't match.
+
+ """
+ try:
+ popped = self.____local__.objects.pop()
+ if obj and popped is not obj:
+ raise AssertionError(
+ 'The object popped (%s) is not the same as the object '
+ 'expected (%s)' % (popped, obj))
+ except AttributeError:
+ raise AssertionError('No object has been registered for this thread')
+
+ def _object_stack(self):
+ """Returns all of the objects stacked in this container
+
+ (Might return [] if there are none)
+ """
+ try:
+ try:
+ objs = self.____local__.objects
+ except AttributeError:
+ return []
+ return objs[:]
+ except AssertionError:
+ return []
+
+ # The following methods will be swapped for their original versions by
+ # StackedObjectRestorer when restoration is enabled. The original
+ # functions (e.g. _current_obj) will be available at _current_obj_orig
+
+ def _current_obj_restoration(self):
+ request_id = restorer.in_restoration()
+ if request_id:
+ return restorer.get_saved_proxied_obj(self, request_id)
+ return self._current_obj_orig()
+ _current_obj_restoration.__doc__ = \
+ ('%s\n(StackedObjectRestorer restoration enabled)' % \
+ _current_obj.__doc__)
+
+ def _push_object_restoration(self, obj):
+ if not restorer.in_restoration():
+ self._push_object_orig(obj)
+ _push_object_restoration.__doc__ = \
+ ('%s\n(StackedObjectRestorer restoration enabled)' % \
+ _push_object.__doc__)
+
+ def _pop_object_restoration(self, obj=None):
+ if not restorer.in_restoration():
+ self._pop_object_orig(obj)
+ _pop_object_restoration.__doc__ = \
+ ('%s\n(StackedObjectRestorer restoration enabled)' % \
+ _pop_object.__doc__)
+
+class Registry(object):
+ """Track objects and stacked object proxies for removal
+
+ The Registry object is instantiated a single time for the request no
+ matter how many times the RegistryManager is used in a WSGI stack. Each
+ RegistryManager must call ``prepare`` before continuing the call to
+ start a new context for object registering.
+
+ Each context is tracked with a dict inside a list. The last list
+ element is the currently executing context. Each context dict is keyed
+ by the id of the StackedObjectProxy instance being proxied, the value
+ is a tuple of the StackedObjectProxy instance and the object being
+ tracked.
+
+ """
+ def __init__(self):
+ """Create a new Registry object
+
+ ``prepare`` must still be called before this Registry object can be
+ used to register objects.
+
+ """
+ self.reglist = []
+
+ def prepare(self):
+ """Used to create a new registry context
+
+ Anytime a new RegistryManager is called, ``prepare`` needs to be
+ called on the existing Registry object. This sets up a new context
+ for registering objects.
+
+ """
+ self.reglist.append({})
+
+ def register(self, stacked, obj):
+ """Register an object with a StackedObjectProxy"""
+ myreglist = self.reglist[-1]
+ stacked_id = id(stacked)
+ if stacked_id in myreglist:
+ stacked._pop_object(myreglist[stacked_id][1])
+ del myreglist[stacked_id]
+ stacked._push_object(obj)
+ myreglist[stacked_id] = (stacked, obj)
+
+ def multiregister(self, stacklist):
+ """Register a list of tuples
+
+ Similar call semantics as register, except this registers
+ multiple objects at once.
+
+ Example::
+
+ registry.multiregister([(sop, obj), (anothersop, anotherobj)])
+
+ """
+ myreglist = self.reglist[-1]
+ for stacked, obj in stacklist:
+ stacked_id = id(stacked)
+ if stacked_id in myreglist:
+ stacked._pop_object(myreglist[stacked_id][1])
+ del myreglist[stacked_id]
+ stacked._push_object(obj)
+ myreglist[stacked_id] = (stacked, obj)
+
+ # Replace now does the same thing as register
+ replace = register
+
+ def cleanup(self):
+ """Remove all objects from all StackedObjectProxy instances that
+ were tracked at this Registry context"""
+ for stacked, obj in self.reglist[-1].itervalues():
+ stacked._pop_object(obj)
+ self.reglist.pop()
+
+class RegistryManager(object):
+ """Creates and maintains a Registry context
+
+ RegistryManager creates a new registry context for the registration of
+ StackedObjectProxy instances. Multiple RegistryManager's can be in a
+ WSGI stack and will manage the context so that the StackedObjectProxies
+ always proxy to the proper object.
+
+ The object being registered can be any object sub-class, list, or dict.
+
+ Registering objects is done inside a WSGI application under the
+ RegistryManager instance, using the ``environ['paste.registry']``
+ object which is a Registry instance.
+
+ """
+ def __init__(self, application, streaming=False):
+ self.application = application
+ self.streaming = streaming
+
+ def __call__(self, environ, start_response):
+ app_iter = None
+ reg = environ.setdefault('paste.registry', Registry())
+ reg.prepare()
+ if self.streaming:
+ return self.streaming_iter(reg, environ, start_response)
+
+ try:
+ app_iter = self.application(environ, start_response)
+ except Exception, e:
+ # Regardless of if the content is an iterable, generator, list
+ # or tuple, we clean-up right now. If its an iterable/generator
+ # care should be used to ensure the generator has its own ref
+ # to the actual object
+ if environ.get('paste.evalexception'):
+ # EvalException is present in the WSGI stack
+ expected = False
+ for expect in environ.get('paste.expected_exceptions', []):
+ if isinstance(e, expect):
+ expected = True
+ if not expected:
+ # An unexpected exception: save state for EvalException
+ restorer.save_registry_state(environ)
+ reg.cleanup()
+ raise
+ except:
+ # Save state for EvalException if it's present
+ if environ.get('paste.evalexception'):
+ restorer.save_registry_state(environ)
+ reg.cleanup()
+ raise
+ else:
+ reg.cleanup()
+
+ return app_iter
+
+ def streaming_iter(self, reg, environ, start_response):
+ try:
+ for item in self.application(environ, start_response):
+ yield item
+ except Exception, e:
+ # Regardless of if the content is an iterable, generator, list
+ # or tuple, we clean-up right now. If its an iterable/generator
+ # care should be used to ensure the generator has its own ref
+ # to the actual object
+ if environ.get('paste.evalexception'):
+ # EvalException is present in the WSGI stack
+ expected = False
+ for expect in environ.get('paste.expected_exceptions', []):
+ if isinstance(e, expect):
+ expected = True
+ if not expected:
+ # An unexpected exception: save state for EvalException
+ restorer.save_registry_state(environ)
+ reg.cleanup()
+ raise
+ except:
+ # Save state for EvalException if it's present
+ if environ.get('paste.evalexception'):
+ restorer.save_registry_state(environ)
+ reg.cleanup()
+ raise
+ else:
+ reg.cleanup()
+
+
+class StackedObjectRestorer(object):
+ """Track StackedObjectProxies and their proxied objects for automatic
+ restoration within EvalException's interactive debugger.
+
+ An instance of this class tracks all StackedObjectProxy state in existence
+ when unexpected exceptions are raised by WSGI applications housed by
+ EvalException and RegistryManager. Like EvalException, this information is
+ stored for the life of the process.
+
+ When an unexpected exception occurs and EvalException is present in the
+ WSGI stack, save_registry_state is intended to be called to store the
+ Registry state and enable automatic restoration on all currently registered
+ StackedObjectProxies.
+
+ With restoration enabled, those StackedObjectProxies' _current_obj
+ (overwritten by _current_obj_restoration) method's strategy is modified:
+ it will return its appropriate proxied object from the restorer when
+ a restoration context is active in the current thread.
+
+ The StackedObjectProxies' _push/pop_object methods strategies are also
+ changed: they no-op when a restoration context is active in the current
+ thread (because the pushing/popping work is all handled by the
+ Registry/restorer).
+
+ The request's Registry objects' reglists are restored from the restorer
+ when a restoration context begins, enabling the Registry methods to work
+ while their changes are tracked by the restorer.
+
+ The overhead of enabling restoration is negligible (another threadlocal
+ access for the changed StackedObjectProxy methods) for normal use outside
+ of a restoration context, but worth mentioning when combined with
+ StackedObjectProxies normal overhead. Once enabled it does not turn off,
+ however:
+
+ o Enabling restoration only occurs after an unexpected exception is
+ detected. The server is likely to be restarted shortly after the exception
+ is raised to fix the cause
+
+ o StackedObjectRestorer is only enabled when EvalException is enabled (not
+ on a production server) and RegistryManager exists in the middleware
+ stack"""
+ def __init__(self):
+ # Registries and their saved reglists by request_id
+ self.saved_registry_states = {}
+ self.restoration_context_id = threadinglocal.local()
+
+ def save_registry_state(self, environ):
+ """Save the state of this request's Registry (if it hasn't already been
+ saved) to the saved_registry_states dict, keyed by the request's unique
+ identifier"""
+ registry = environ.get('paste.registry')
+ if not registry or not len(registry.reglist) or \
+ self.get_request_id(environ) in self.saved_registry_states:
+ # No Registry, no state to save, or this request's state has
+ # already been saved
+ return
+
+ self.saved_registry_states[self.get_request_id(environ)] = \
+ (registry, registry.reglist[:])
+
+ # Tweak the StackedObjectProxies we want to save state for -- change
+ # their methods to act differently when a restoration context is active
+ # in the current thread
+ for reglist in registry.reglist:
+ for stacked, obj in reglist.itervalues():
+ self.enable_restoration(stacked)
+
+ def get_saved_proxied_obj(self, stacked, request_id):
+ """Retrieve the saved object proxied by the specified
+ StackedObjectProxy for the request identified by request_id"""
+ # All state for the request identified by request_id
+ reglist = self.saved_registry_states[request_id][1]
+
+ # The top of the stack was current when the exception occurred
+ stack_level = len(reglist) - 1
+ stacked_id = id(stacked)
+ while True:
+ if stack_level < 0:
+ # Nothing registered: Call _current_obj_orig to raise a
+ # TypeError
+ return stacked._current_obj_orig()
+ context = reglist[stack_level]
+ if stacked_id in context:
+ break
+ # This StackedObjectProxy may not have been registered by the
+ # RegistryManager that was active when the exception was raised --
+ # continue searching down the stack until it's found
+ stack_level -= 1
+ return context[stacked_id][1]
+
+ def enable_restoration(self, stacked):
+ """Replace the specified StackedObjectProxy's methods with their
+ respective restoration versions.
+
+ _current_obj_restoration forces recovery of the saved proxied object
+ when a restoration context is active in the current thread.
+
+ _push/pop_object_restoration avoid pushing/popping data
+ (pushing/popping is only done at the Registry level) when a restoration
+ context is active in the current thread"""
+ if '_current_obj_orig' in stacked.__dict__:
+ # Restoration already enabled
+ return
+
+ for func_name in ('_current_obj', '_push_object', '_pop_object'):
+ orig_func = getattr(stacked, func_name)
+ restoration_func = getattr(stacked, func_name + '_restoration')
+ stacked.__dict__[func_name + '_orig'] = orig_func
+ stacked.__dict__[func_name] = restoration_func
+
+ def get_request_id(self, environ):
+ """Return a unique identifier for the current request"""
+ from paste.evalexception.middleware import get_debug_count
+ return get_debug_count(environ)
+
+ def restoration_begin(self, request_id):
+ """Enable a restoration context in the current thread for the specified
+ request_id"""
+ if request_id in self.saved_registry_states:
+ # Restore the old Registry object's state
+ registry, reglist = self.saved_registry_states[request_id]
+ registry.reglist = reglist
+
+ self.restoration_context_id.request_id = request_id
+
+ def restoration_end(self):
+ """Register a restoration context as finished, if one exists"""
+ try:
+ del self.restoration_context_id.request_id
+ except AttributeError:
+ pass
+
+ def in_restoration(self):
+ """Determine if a restoration context is active for the current thread.
+ Returns the request_id it's active for if so, otherwise False"""
+ return getattr(self.restoration_context_id, 'request_id', False)
+
+restorer = StackedObjectRestorer()
+
+
+# Paste Deploy entry point
+def make_registry_manager(app, global_conf):
+ return RegistryManager(app)
+
+make_registry_manager.__doc__ = RegistryManager.__doc__
diff --git a/paste/reloader.py b/paste/reloader.py
new file mode 100644
index 0000000..a0e3850
--- /dev/null
+++ b/paste/reloader.py
@@ -0,0 +1,178 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+A file monitor and server restarter.
+
+Use this like:
+
+..code-block:: Python
+
+ import reloader
+ reloader.install()
+
+Then make sure your server is installed with a shell script like::
+
+ err=3
+ while test "$err" -eq 3 ; do
+ python server.py
+ err="$?"
+ done
+
+or is run from this .bat file (if you use Windows)::
+
+ @echo off
+ :repeat
+ python server.py
+ if %errorlevel% == 3 goto repeat
+
+or run a monitoring process in Python (``paster serve --reload`` does
+this).
+
+Use the ``watch_file(filename)`` function to cause a reload/restart for
+other other non-Python files (e.g., configuration files). If you have
+a dynamic set of files that grows over time you can use something like::
+
+ def watch_config_files():
+ return CONFIG_FILE_CACHE.keys()
+ paste.reloader.add_file_callback(watch_config_files)
+
+Then every time the reloader polls files it will call
+``watch_config_files`` and check all the filenames it returns.
+"""
+
+import os
+import sys
+import time
+import threading
+import traceback
+from paste.util.classinstance import classinstancemethod
+
+def install(poll_interval=1):
+ """
+ Install the reloading monitor.
+
+ On some platforms server threads may not terminate when the main
+ thread does, causing ports to remain open/locked. The
+ ``raise_keyboard_interrupt`` option creates a unignorable signal
+ which causes the whole application to shut-down (rudely).
+ """
+ mon = Monitor(poll_interval=poll_interval)
+ t = threading.Thread(target=mon.periodic_reload)
+ t.setDaemon(True)
+ t.start()
+
+class Monitor(object):
+
+ instances = []
+ global_extra_files = []
+ global_file_callbacks = []
+
+ def __init__(self, poll_interval):
+ self.module_mtimes = {}
+ self.keep_running = True
+ self.poll_interval = poll_interval
+ self.extra_files = list(self.global_extra_files)
+ self.instances.append(self)
+ self.file_callbacks = list(self.global_file_callbacks)
+
+ def periodic_reload(self):
+ while True:
+ if not self.check_reload():
+ # use os._exit() here and not sys.exit() since within a
+ # thread sys.exit() just closes the given thread and
+ # won't kill the process; note os._exit does not call
+ # any atexit callbacks, nor does it do finally blocks,
+ # flush open files, etc. In otherwords, it is rude.
+ os._exit(3)
+ break
+ time.sleep(self.poll_interval)
+
+ def check_reload(self):
+ filenames = list(self.extra_files)
+ for file_callback in self.file_callbacks:
+ try:
+ filenames.extend(file_callback())
+ except:
+ print >> sys.stderr, "Error calling paste.reloader callback %r:" % file_callback
+ traceback.print_exc()
+ for module in sys.modules.values():
+ try:
+ filename = module.__file__
+ except (AttributeError, ImportError), exc:
+ continue
+ if filename is not None:
+ filenames.append(filename)
+ for filename in filenames:
+ try:
+ stat = os.stat(filename)
+ if stat:
+ mtime = stat.st_mtime
+ else:
+ mtime = 0
+ except (OSError, IOError):
+ continue
+ if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
+ mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
+ elif filename.endswith('$py.class') and \
+ os.path.exists(filename[:-9] + '.py'):
+ mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
+ if not self.module_mtimes.has_key(filename):
+ self.module_mtimes[filename] = mtime
+ elif self.module_mtimes[filename] < mtime:
+ print >> sys.stderr, (
+ "%s changed; reloading..." % filename)
+ return False
+ return True
+
+ def watch_file(self, cls, filename):
+ """Watch the named file for changes"""
+ filename = os.path.abspath(filename)
+ if self is None:
+ for instance in cls.instances:
+ instance.watch_file(filename)
+ cls.global_extra_files.append(filename)
+ else:
+ self.extra_files.append(filename)
+
+ watch_file = classinstancemethod(watch_file)
+
+ def add_file_callback(self, cls, callback):
+ """Add a callback -- a function that takes no parameters -- that will
+ return a list of filenames to watch for changes."""
+ if self is None:
+ for instance in cls.instances:
+ instance.add_file_callback(callback)
+ cls.global_file_callbacks.append(callback)
+ else:
+ self.file_callbacks.append(callback)
+
+ add_file_callback = classinstancemethod(add_file_callback)
+
+if sys.platform.startswith('java'):
+ try:
+ from _systemrestart import SystemRestart
+ except ImportError:
+ pass
+ else:
+ class JythonMonitor(Monitor):
+
+ """
+ Monitor that utilizes Jython's special
+ ``_systemrestart.SystemRestart`` exception.
+
+ When raised from the main thread it causes Jython to reload
+ the interpreter in the existing Java process (avoiding
+ startup time).
+
+ Note that this functionality of Jython is experimental and
+ may change in the future.
+ """
+
+ def periodic_reload(self):
+ while True:
+ if not self.check_reload():
+ raise SystemRestart()
+ time.sleep(self.poll_interval)
+
+watch_file = Monitor.watch_file
+add_file_callback = Monitor.add_file_callback
diff --git a/paste/request.py b/paste/request.py
new file mode 100644
index 0000000..9af494d
--- /dev/null
+++ b/paste/request.py
@@ -0,0 +1,411 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Ian Bicking and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+This module provides helper routines with work directly on a WSGI
+environment to solve common requirements.
+
+ * get_cookies(environ)
+ * parse_querystring(environ)
+ * parse_formvars(environ, include_get_vars=True)
+ * construct_url(environ, with_query_string=True, with_path_info=True,
+ script_name=None, path_info=None, querystring=None)
+ * path_info_split(path_info)
+ * path_info_pop(environ)
+ * resolve_relative_url(url, environ)
+
+"""
+import cgi
+from Cookie import SimpleCookie, CookieError
+from StringIO import StringIO
+import urlparse
+import urllib
+try:
+ from UserDict import DictMixin
+except ImportError:
+ from paste.util.UserDict24 import DictMixin
+from paste.util.multidict import MultiDict
+
+__all__ = ['get_cookies', 'get_cookie_dict', 'parse_querystring',
+ 'parse_formvars', 'construct_url', 'path_info_split',
+ 'path_info_pop', 'resolve_relative_url', 'EnvironHeaders']
+
+def get_cookies(environ):
+ """
+ Gets a cookie object (which is a dictionary-like object) from the
+ request environment; caches this value in case get_cookies is
+ called again for the same request.
+
+ """
+ header = environ.get('HTTP_COOKIE', '')
+ if environ.has_key('paste.cookies'):
+ cookies, check_header = environ['paste.cookies']
+ if check_header == header:
+ return cookies
+ cookies = SimpleCookie()
+ try:
+ cookies.load(header)
+ except CookieError:
+ pass
+ environ['paste.cookies'] = (cookies, header)
+ return cookies
+
+def get_cookie_dict(environ):
+ """Return a *plain* dictionary of cookies as found in the request.
+
+ Unlike ``get_cookies`` this returns a dictionary, not a
+ ``SimpleCookie`` object. For incoming cookies a dictionary fully
+ represents the information. Like ``get_cookies`` this caches and
+ checks the cache.
+ """
+ header = environ.get('HTTP_COOKIE')
+ if not header:
+ return {}
+ if environ.has_key('paste.cookies.dict'):
+ cookies, check_header = environ['paste.cookies.dict']
+ if check_header == header:
+ return cookies
+ cookies = SimpleCookie()
+ try:
+ cookies.load(header)
+ except CookieError:
+ pass
+ result = {}
+ for name in cookies:
+ result[name] = cookies[name].value
+ environ['paste.cookies.dict'] = (result, header)
+ return result
+
+def parse_querystring(environ):
+ """
+ Parses a query string into a list like ``[(name, value)]``.
+ Caches this value in case parse_querystring is called again
+ for the same request.
+
+ You can pass the result to ``dict()``, but be aware that keys that
+ appear multiple times will be lost (only the last value will be
+ preserved).
+
+ """
+ source = environ.get('QUERY_STRING', '')
+ if not source:
+ return []
+ if 'paste.parsed_querystring' in environ:
+ parsed, check_source = environ['paste.parsed_querystring']
+ if check_source == source:
+ return parsed
+ parsed = cgi.parse_qsl(source, keep_blank_values=True,
+ strict_parsing=False)
+ environ['paste.parsed_querystring'] = (parsed, source)
+ return parsed
+
+def parse_dict_querystring(environ):
+ """Parses a query string like parse_querystring, but returns a MultiDict
+
+ Caches this value in case parse_dict_querystring is called again
+ for the same request.
+
+ Example::
+
+ >>> environ = {'QUERY_STRING': 'day=Monday&user=fred&user=jane'}
+ >>> parsed = parse_dict_querystring(environ)
+
+ >>> parsed['day']
+ 'Monday'
+ >>> parsed['user']
+ 'fred'
+ >>> parsed.getall('user')
+ ['fred', 'jane']
+
+ """
+ source = environ.get('QUERY_STRING', '')
+ if not source:
+ return MultiDict()
+ if 'paste.parsed_dict_querystring' in environ:
+ parsed, check_source = environ['paste.parsed_dict_querystring']
+ if check_source == source:
+ return parsed
+ parsed = cgi.parse_qsl(source, keep_blank_values=True,
+ strict_parsing=False)
+ multi = MultiDict(parsed)
+ environ['paste.parsed_dict_querystring'] = (multi, source)
+ return multi
+
+def parse_formvars(environ, include_get_vars=True):
+ """Parses the request, returning a MultiDict of form variables.
+
+ If ``include_get_vars`` is true then GET (query string) variables
+ will also be folded into the MultiDict.
+
+ All values should be strings, except for file uploads which are
+ left as ``FieldStorage`` instances.
+
+ If the request was not a normal form request (e.g., a POST with an
+ XML body) then ``environ['wsgi.input']`` won't be read.
+ """
+ source = environ['wsgi.input']
+ if 'paste.parsed_formvars' in environ:
+ parsed, check_source = environ['paste.parsed_formvars']
+ if check_source == source:
+ if include_get_vars:
+ parsed.update(parse_querystring(environ))
+ return parsed
+ # @@: Shouldn't bother FieldStorage parsing during GET/HEAD and
+ # fake_out_cgi requests
+ type = environ.get('CONTENT_TYPE', '').lower()
+ if ';' in type:
+ type = type.split(';', 1)[0]
+ fake_out_cgi = type not in ('', 'application/x-www-form-urlencoded',
+ 'multipart/form-data')
+ # FieldStorage assumes a default CONTENT_LENGTH of -1, but a
+ # default of 0 is better:
+ if not environ.get('CONTENT_LENGTH'):
+ environ['CONTENT_LENGTH'] = '0'
+ # Prevent FieldStorage from parsing QUERY_STRING during GET/HEAD
+ # requests
+ old_query_string = environ.get('QUERY_STRING','')
+ environ['QUERY_STRING'] = ''
+ if fake_out_cgi:
+ input = StringIO('')
+ old_content_type = environ.get('CONTENT_TYPE')
+ old_content_length = environ.get('CONTENT_LENGTH')
+ environ['CONTENT_LENGTH'] = '0'
+ environ['CONTENT_TYPE'] = ''
+ else:
+ input = environ['wsgi.input']
+ fs = cgi.FieldStorage(fp=input,
+ environ=environ,
+ keep_blank_values=1)
+ environ['QUERY_STRING'] = old_query_string
+ if fake_out_cgi:
+ environ['CONTENT_TYPE'] = old_content_type
+ environ['CONTENT_LENGTH'] = old_content_length
+ formvars = MultiDict()
+ if isinstance(fs.value, list):
+ for name in fs.keys():
+ values = fs[name]
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ if not value.filename:
+ value = value.value
+ formvars.add(name, value)
+ environ['paste.parsed_formvars'] = (formvars, source)
+ if include_get_vars:
+ formvars.update(parse_querystring(environ))
+ return formvars
+
+def construct_url(environ, with_query_string=True, with_path_info=True,
+ script_name=None, path_info=None, querystring=None):
+ """Reconstructs the URL from the WSGI environment.
+
+ You may override SCRIPT_NAME, PATH_INFO, and QUERYSTRING with
+ the keyword arguments.
+
+ """
+ url = environ['wsgi.url_scheme']+'://'
+
+ if environ.get('HTTP_HOST'):
+ host = environ['HTTP_HOST']
+ port = None
+ if ':' in host:
+ host, port = host.split(':', 1)
+ if environ['wsgi.url_scheme'] == 'https':
+ if port == '443':
+ port = None
+ elif environ['wsgi.url_scheme'] == 'http':
+ if port == '80':
+ port = None
+ url += host
+ if port:
+ url += ':%s' % port
+ else:
+ url += environ['SERVER_NAME']
+ if environ['wsgi.url_scheme'] == 'https':
+ if environ['SERVER_PORT'] != '443':
+ url += ':' + environ['SERVER_PORT']
+ else:
+ if environ['SERVER_PORT'] != '80':
+ url += ':' + environ['SERVER_PORT']
+
+ if script_name is None:
+ url += urllib.quote(environ.get('SCRIPT_NAME',''))
+ else:
+ url += urllib.quote(script_name)
+ if with_path_info:
+ if path_info is None:
+ url += urllib.quote(environ.get('PATH_INFO',''))
+ else:
+ url += urllib.quote(path_info)
+ if with_query_string:
+ if querystring is None:
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ elif querystring:
+ url += '?' + querystring
+ return url
+
+def resolve_relative_url(url, environ):
+ """
+ Resolve the given relative URL as being relative to the
+ location represented by the environment. This can be used
+ for redirecting to a relative path. Note: if url is already
+ absolute, this function will (intentionally) have no effect
+ on it.
+
+ """
+ cur_url = construct_url(environ, with_query_string=False)
+ return urlparse.urljoin(cur_url, url)
+
+def path_info_split(path_info):
+ """
+ Splits off the first segment of the path. Returns (first_part,
+ rest_of_path). first_part can be None (if PATH_INFO is empty), ''
+ (if PATH_INFO is '/'), or a name without any /'s. rest_of_path
+ can be '' or a string starting with /.
+
+ """
+ if not path_info:
+ return None, ''
+ assert path_info.startswith('/'), (
+ "PATH_INFO should start with /: %r" % path_info)
+ path_info = path_info.lstrip('/')
+ if '/' in path_info:
+ first, rest = path_info.split('/', 1)
+ return first, '/' + rest
+ else:
+ return path_info, ''
+
+def path_info_pop(environ):
+ """
+ 'Pops' off the next segment of PATH_INFO, pushing it onto
+ SCRIPT_NAME, and returning that segment.
+
+ For instance::
+
+ >>> def call_it(script_name, path_info):
+ ... env = {'SCRIPT_NAME': script_name, 'PATH_INFO': path_info}
+ ... result = path_info_pop(env)
+ ... print 'SCRIPT_NAME=%r; PATH_INFO=%r; returns=%r' % (
+ ... env['SCRIPT_NAME'], env['PATH_INFO'], result)
+ >>> call_it('/foo', '/bar')
+ SCRIPT_NAME='/foo/bar'; PATH_INFO=''; returns='bar'
+ >>> call_it('/foo/bar', '')
+ SCRIPT_NAME='/foo/bar'; PATH_INFO=''; returns=None
+ >>> call_it('/foo/bar', '/')
+ SCRIPT_NAME='/foo/bar/'; PATH_INFO=''; returns=''
+ >>> call_it('', '/1/2/3')
+ SCRIPT_NAME='/1'; PATH_INFO='/2/3'; returns='1'
+ >>> call_it('', '//1/2')
+ SCRIPT_NAME='//1'; PATH_INFO='/2'; returns='1'
+
+ """
+ path = environ.get('PATH_INFO', '')
+ if not path:
+ return None
+ while path.startswith('/'):
+ environ['SCRIPT_NAME'] += '/'
+ path = path[1:]
+ if '/' not in path:
+ environ['SCRIPT_NAME'] += path
+ environ['PATH_INFO'] = ''
+ return path
+ else:
+ segment, path = path.split('/', 1)
+ environ['PATH_INFO'] = '/' + path
+ environ['SCRIPT_NAME'] += segment
+ return segment
+
+_parse_headers_special = {
+ # This is a Zope convention, but we'll allow it here:
+ 'HTTP_CGI_AUTHORIZATION': 'Authorization',
+ 'CONTENT_LENGTH': 'Content-Length',
+ 'CONTENT_TYPE': 'Content-Type',
+ }
+
+def parse_headers(environ):
+ """
+ Parse the headers in the environment (like ``HTTP_HOST``) and
+ yield a sequence of those (header_name, value) tuples.
+ """
+ # @@: Maybe should parse out comma-separated headers?
+ for cgi_var, value in environ.iteritems():
+ if cgi_var in _parse_headers_special:
+ yield _parse_headers_special[cgi_var], value
+ elif cgi_var.startswith('HTTP_'):
+ yield cgi_var[5:].title().replace('_', '-'), value
+
+class EnvironHeaders(DictMixin):
+ """An object that represents the headers as present in a
+ WSGI environment.
+
+ This object is a wrapper (with no internal state) for a WSGI
+ request object, representing the CGI-style HTTP_* keys as a
+ dictionary. Because a CGI environment can only hold one value for
+ each key, this dictionary is single-valued (unlike outgoing
+ headers).
+ """
+
+ def __init__(self, environ):
+ self.environ = environ
+
+ def _trans_name(self, name):
+ key = 'HTTP_'+name.replace('-', '_').upper()
+ if key == 'HTTP_CONTENT_LENGTH':
+ key = 'CONTENT_LENGTH'
+ elif key == 'HTTP_CONTENT_TYPE':
+ key = 'CONTENT_TYPE'
+ return key
+
+ def _trans_key(self, key):
+ if key == 'CONTENT_TYPE':
+ return 'Content-Type'
+ elif key == 'CONTENT_LENGTH':
+ return 'Content-Length'
+ elif key.startswith('HTTP_'):
+ return key[5:].replace('_', '-').title()
+ else:
+ return None
+
+ def __getitem__(self, item):
+ return self.environ[self._trans_name(item)]
+
+ def __setitem__(self, item, value):
+ # @@: Should this dictionary be writable at all?
+ self.environ[self._trans_name(item)] = value
+
+ def __delitem__(self, item):
+ del self.environ[self._trans_name(item)]
+
+ def __iter__(self):
+ for key in self.environ:
+ name = self._trans_key(key)
+ if name is not None:
+ yield name
+
+ def keys(self):
+ return list(iter(self))
+
+ def __contains__(self, item):
+ return self._trans_name(item) in self.environ
+
+def _cgi_FieldStorage__repr__patch(self):
+ """ monkey patch for FieldStorage.__repr__
+
+ Unbelievely, the default __repr__ on FieldStorage reads
+ the entire file content instead of being sane about it.
+ This is a simple replacement that doesn't do that
+ """
+ if self.file:
+ return "FieldStorage(%r, %r)" % (
+ self.name, self.filename)
+ return "FieldStorage(%r, %r, %r)" % (
+ self.name, self.filename, self.value)
+
+cgi.FieldStorage.__repr__ = _cgi_FieldStorage__repr__patch
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/response.py b/paste/response.py
new file mode 100644
index 0000000..5ce0320
--- /dev/null
+++ b/paste/response.py
@@ -0,0 +1,240 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""Routines to generate WSGI responses"""
+
+############################################################
+## Headers
+############################################################
+import warnings
+
+class HeaderDict(dict):
+
+ """
+ This represents response headers. It handles the headers as a
+ dictionary, with case-insensitive keys.
+
+ Also there is an ``.add(key, value)`` method, which sets the key,
+ or adds the value to the current value (turning it into a list if
+ necessary).
+
+ For passing to WSGI there is a ``.headeritems()`` method which is
+ like ``.items()`` but unpacks value that are lists. It also
+ handles encoding -- all headers are encoded in ASCII (if they are
+ unicode).
+
+ @@: Should that encoding be ISO-8859-1 or UTF-8? I'm not sure
+ what the spec says.
+ """
+
+ def __getitem__(self, key):
+ return dict.__getitem__(self, self.normalize(key))
+
+ def __setitem__(self, key, value):
+ dict.__setitem__(self, self.normalize(key), value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, self.normalize(key))
+
+ def __contains__(self, key):
+ return dict.__contains__(self, self.normalize(key))
+
+ has_key = __contains__
+
+ def get(self, key, failobj=None):
+ return dict.get(self, self.normalize(key), failobj)
+
+ def setdefault(self, key, failobj=None):
+ return dict.setdefault(self, self.normalize(key), failobj)
+
+ def pop(self, key, *args):
+ return dict.pop(self, self.normalize(key), *args)
+
+ def update(self, other):
+ for key in other:
+ self[self.normalize(key)] = other[key]
+
+ def normalize(self, key):
+ return str(key).lower().strip()
+
+ def add(self, key, value):
+ key = self.normalize(key)
+ if key in self:
+ if isinstance(self[key], list):
+ self[key].append(value)
+ else:
+ self[key] = [self[key], value]
+ else:
+ self[key] = value
+
+ def headeritems(self):
+ result = []
+ for key, value in self.items():
+ if isinstance(value, list):
+ for v in value:
+ result.append((key, str(v)))
+ else:
+ result.append((key, str(value)))
+ return result
+
+ #@classmethod
+ def fromlist(cls, seq):
+ self = cls()
+ for name, value in seq:
+ self.add(name, value)
+ return self
+
+ fromlist = classmethod(fromlist)
+
+def has_header(headers, name):
+ """
+ Is header named ``name`` present in headers?
+ """
+ name = name.lower()
+ for header, value in headers:
+ if header.lower() == name:
+ return True
+ return False
+
+def header_value(headers, name):
+ """
+ Returns the header's value, or None if no such header. If a
+ header appears more than once, all the values of the headers
+ are joined with ','. Note that this is consistent /w RFC 2616
+ section 4.2 which states:
+
+ It MUST be possible to combine the multiple header fields
+ into one "field-name: field-value" pair, without changing
+ the semantics of the message, by appending each subsequent
+ field-value to the first, each separated by a comma.
+
+ However, note that the original netscape usage of 'Set-Cookie',
+ especially in MSIE which contains an 'expires' date will is not
+ compatible with this particular concatination method.
+ """
+ name = name.lower()
+ result = [value for header, value in headers
+ if header.lower() == name]
+ if result:
+ return ','.join(result)
+ else:
+ return None
+
+def remove_header(headers, name):
+ """
+ Removes the named header from the list of headers. Returns the
+ value of that header, or None if no header found. If multiple
+ headers are found, only the last one is returned.
+ """
+ name = name.lower()
+ i = 0
+ result = None
+ while i < len(headers):
+ if headers[i][0].lower() == name:
+ result = headers[i][1]
+ del headers[i]
+ continue
+ i += 1
+ return result
+
+def replace_header(headers, name, value):
+ """
+ Updates the headers replacing the first occurance of the given name
+ with the value provided; asserting that no further occurances
+ happen. Note that this is _not_ the same as remove_header and then
+ append, as two distinct operations (del followed by an append) are
+ not atomic in a threaded environment. Returns the previous header
+ value for the provided name, if any. Clearly one should not use
+ this function with ``set-cookie`` or other names that may have more
+ than one occurance in the headers.
+ """
+ name = name.lower()
+ i = 0
+ result = None
+ while i < len(headers):
+ if headers[i][0].lower() == name:
+ assert not result, "two values for the header '%s' found" % name
+ result = headers[i][1]
+ headers[i] = (name, value)
+ i += 1
+ if not result:
+ headers.append((name, value))
+ return result
+
+
+############################################################
+## Deprecated methods
+############################################################
+
+def error_body_response(error_code, message, __warn=True):
+ """
+ Returns a standard HTML response page for an HTTP error.
+ **Note:** Deprecated
+ """
+ if __warn:
+ warnings.warn(
+ 'wsgilib.error_body_response is deprecated; use the '
+ 'wsgi_application method on an HTTPException object '
+ 'instead', DeprecationWarning, 2)
+ return '''\
+<html>
+ <head>
+ <title>%(error_code)s</title>
+ </head>
+ <body>
+ <h1>%(error_code)s</h1>
+ %(message)s
+ </body>
+</html>''' % {
+ 'error_code': error_code,
+ 'message': message,
+ }
+
+
+def error_response(environ, error_code, message,
+ debug_message=None, __warn=True):
+ """
+ Returns the status, headers, and body of an error response.
+
+ Use like:
+
+ .. code-block:: python
+
+ status, headers, body = wsgilib.error_response(
+ '301 Moved Permanently', 'Moved to <a href="%s">%s</a>'
+ % (url, url))
+ start_response(status, headers)
+ return [body]
+
+ **Note:** Deprecated
+ """
+ if __warn:
+ warnings.warn(
+ 'wsgilib.error_response is deprecated; use the '
+ 'wsgi_application method on an HTTPException object '
+ 'instead', DeprecationWarning, 2)
+ if debug_message and environ.get('paste.config', {}).get('debug'):
+ message += '\n\n<!-- %s -->' % debug_message
+ body = error_body_response(error_code, message, __warn=False)
+ headers = [('content-type', 'text/html'),
+ ('content-length', str(len(body)))]
+ return error_code, headers, body
+
+def error_response_app(error_code, message, debug_message=None,
+ __warn=True):
+ """
+ An application that emits the given error response.
+
+ **Note:** Deprecated
+ """
+ if __warn:
+ warnings.warn(
+ 'wsgilib.error_response_app is deprecated; use the '
+ 'wsgi_application method on an HTTPException object '
+ 'instead', DeprecationWarning, 2)
+ def application(environ, start_response):
+ status, headers, body = error_response(
+ environ, error_code, message,
+ debug_message=debug_message, __warn=False)
+ start_response(status, headers)
+ return [body]
+ return application
diff --git a/paste/session.py b/paste/session.py
new file mode 100644
index 0000000..16a7739
--- /dev/null
+++ b/paste/session.py
@@ -0,0 +1,337 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Creates a session object in your WSGI environment.
+
+Use like:
+
+..code-block:: Python
+
+ environ['paste.session.factory']()
+
+This will return a dictionary. The contents of this dictionary will
+be saved to disk when the request is completed. The session will be
+created when you first fetch the session dictionary, and a cookie will
+be sent in that case. There's current no way to use sessions without
+cookies, and there's no way to delete a session except to clear its
+data.
+
+@@: This doesn't do any locking, and may cause problems when a single
+session is accessed concurrently. Also, it loads and saves the
+session for each request, with no caching. Also, sessions aren't
+expired.
+"""
+
+from Cookie import SimpleCookie
+import time
+import random
+import os
+import datetime
+import threading
+import tempfile
+
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+from paste import wsgilib
+from paste import request
+
+class SessionMiddleware(object):
+
+ def __init__(self, application, global_conf=None, **factory_kw):
+ self.application = application
+ self.factory_kw = factory_kw
+
+ def __call__(self, environ, start_response):
+ session_factory = SessionFactory(environ, **self.factory_kw)
+ environ['paste.session.factory'] = session_factory
+ remember_headers = []
+
+ def session_start_response(status, headers, exc_info=None):
+ if not session_factory.created:
+ remember_headers[:] = [status, headers]
+ return start_response(status, headers)
+ headers.append(session_factory.set_cookie_header())
+ return start_response(status, headers, exc_info)
+
+ app_iter = self.application(environ, session_start_response)
+ def start():
+ if session_factory.created and remember_headers:
+ # Tricky bastard used the session after start_response
+ status, headers = remember_headers
+ headers.append(session_factory.set_cookie_header())
+ exc = ValueError(
+ "You cannot get the session after content from the "
+ "app_iter has been returned")
+ start_response(status, headers, (exc.__class__, exc, None))
+ def close():
+ if session_factory.used:
+ session_factory.close()
+ return wsgilib.add_start_close(app_iter, start, close)
+
+
+class SessionFactory(object):
+
+
+ def __init__(self, environ, cookie_name='_SID_',
+ session_class=None,
+ session_expiration=60*12, # in minutes
+ **session_class_kw):
+
+ self.created = False
+ self.used = False
+ self.environ = environ
+ self.cookie_name = cookie_name
+ self.session = None
+ self.session_class = session_class or FileSession
+ self.session_class_kw = session_class_kw
+
+ self.expiration = session_expiration
+
+ def __call__(self):
+ self.used = True
+ if self.session is not None:
+ return self.session.data()
+ cookies = request.get_cookies(self.environ)
+ session = None
+ if cookies.has_key(self.cookie_name):
+ self.sid = cookies[self.cookie_name].value
+ try:
+ session = self.session_class(self.sid, create=False,
+ **self.session_class_kw)
+ except KeyError:
+ # Invalid SID
+ pass
+ if session is None:
+ self.created = True
+ self.sid = self.make_sid()
+ session = self.session_class(self.sid, create=True,
+ **self.session_class_kw)
+ session.clean_up()
+ self.session = session
+ return session.data()
+
+ def has_session(self):
+ if self.session is not None:
+ return True
+ cookies = request.get_cookies(self.environ)
+ if cookies.has_key(self.cookie_name):
+ return True
+ return False
+
+ def make_sid(self):
+ # @@: need better algorithm
+ return (''.join(['%02d' % x for x in time.localtime(time.time())[:6]])
+ + '-' + self.unique_id())
+
+ def unique_id(self, for_object=None):
+ """
+ Generates an opaque, identifier string that is practically
+ guaranteed to be unique. If an object is passed, then its
+ id() is incorporated into the generation. Relies on md5 and
+ returns a 32 character long string.
+ """
+ r = [time.time(), random.random()]
+ if hasattr(os, 'times'):
+ r.append(os.times())
+ if for_object is not None:
+ r.append(id(for_object))
+ md5_hash = md5(str(r))
+ try:
+ return md5_hash.hexdigest()
+ except AttributeError:
+ # Older versions of Python didn't have hexdigest, so we'll
+ # do it manually
+ hexdigest = []
+ for char in md5_hash.digest():
+ hexdigest.append('%02x' % ord(char))
+ return ''.join(hexdigest)
+
+ def set_cookie_header(self):
+ c = SimpleCookie()
+ c[self.cookie_name] = self.sid
+ c[self.cookie_name]['path'] = '/'
+
+ gmt_expiration_time = time.gmtime(time.time() + (self.expiration * 60))
+ c[self.cookie_name]['expires'] = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", gmt_expiration_time)
+
+ name, value = str(c).split(': ', 1)
+ return (name, value)
+
+ def close(self):
+ if self.session is not None:
+ self.session.close()
+
+
+last_cleanup = None
+cleaning_up = False
+cleanup_cycle = datetime.timedelta(seconds=15*60) #15 min
+
+class FileSession(object):
+
+ def __init__(self, sid, create=False, session_file_path=tempfile.gettempdir(),
+ chmod=None,
+ expiration=2880, # in minutes: 48 hours
+ ):
+ if chmod and isinstance(chmod, basestring):
+ chmod = int(chmod, 8)
+ self.chmod = chmod
+ if not sid:
+ # Invalid...
+ raise KeyError
+ self.session_file_path = session_file_path
+ self.sid = sid
+ if not create:
+ if not os.path.exists(self.filename()):
+ raise KeyError
+ self._data = None
+
+ self.expiration = expiration
+
+
+ def filename(self):
+ return os.path.join(self.session_file_path, self.sid)
+
+ def data(self):
+ if self._data is not None:
+ return self._data
+ if os.path.exists(self.filename()):
+ f = open(self.filename(), 'rb')
+ self._data = cPickle.load(f)
+ f.close()
+ else:
+ self._data = {}
+ return self._data
+
+ def close(self):
+ if self._data is not None:
+ filename = self.filename()
+ exists = os.path.exists(filename)
+ if not self._data:
+ if exists:
+ os.unlink(filename)
+ else:
+ f = open(filename, 'wb')
+ cPickle.dump(self._data, f)
+ f.close()
+ if not exists and self.chmod:
+ os.chmod(filename, self.chmod)
+
+ def _clean_up(self):
+ global cleaning_up
+ try:
+ exp_time = datetime.timedelta(seconds=self.expiration*60)
+ now = datetime.datetime.now()
+
+ #Open every session and check that it isn't too old
+ for root, dirs, files in os.walk(self.session_file_path):
+ for f in files:
+ self._clean_up_file(f, exp_time=exp_time, now=now)
+ finally:
+ cleaning_up = False
+
+ def _clean_up_file(self, f, exp_time, now):
+ t = f.split("-")
+ if len(t) != 2:
+ return
+ t = t[0]
+ try:
+ sess_time = datetime.datetime(
+ int(t[0:4]),
+ int(t[4:6]),
+ int(t[6:8]),
+ int(t[8:10]),
+ int(t[10:12]),
+ int(t[12:14]))
+ except ValueError:
+ # Probably not a session file at all
+ return
+
+ if sess_time + exp_time < now:
+ os.remove(os.path.join(self.session_file_path, f))
+
+ def clean_up(self):
+ global last_cleanup, cleanup_cycle, cleaning_up
+ now = datetime.datetime.now()
+
+ if cleaning_up:
+ return
+
+ if not last_cleanup or last_cleanup + cleanup_cycle < now:
+ if not cleaning_up:
+ cleaning_up = True
+ try:
+ last_cleanup = now
+ t = threading.Thread(target=self._clean_up)
+ t.start()
+ except:
+ # Normally _clean_up should set cleaning_up
+ # to false, but if something goes wrong starting
+ # it...
+ cleaning_up = False
+ raise
+
+class _NoDefault(object):
+ def __repr__(self):
+ return '<dynamic default>'
+NoDefault = _NoDefault()
+
+def make_session_middleware(
+ app, global_conf,
+ session_expiration=NoDefault,
+ expiration=NoDefault,
+ cookie_name=NoDefault,
+ session_file_path=NoDefault,
+ chmod=NoDefault):
+ """
+ Adds a middleware that handles sessions for your applications.
+ The session is a peristent dictionary. To get this dictionary
+ in your application, use ``environ['paste.session.factory']()``
+ which returns this persistent dictionary.
+
+ Configuration:
+
+ session_expiration:
+ The time each session lives, in minutes. This controls
+ the cookie expiration. Default 12 hours.
+
+ expiration:
+ The time each session lives on disk. Old sessions are
+ culled from disk based on this. Default 48 hours.
+
+ cookie_name:
+ The cookie name used to track the session. Use different
+ names to avoid session clashes.
+
+ session_file_path:
+ Sessions are put in this location, default /tmp.
+
+ chmod:
+ The octal chmod you want to apply to new sessions (e.g., 660
+ to make the sessions group readable/writable)
+
+ Each of these also takes from the global configuration. cookie_name
+ and chmod take from session_cookie_name and session_chmod
+ """
+ if session_expiration is NoDefault:
+ session_expiration = global_conf.get('session_expiration', 60*12)
+ session_expiration = int(session_expiration)
+ if expiration is NoDefault:
+ expiration = global_conf.get('expiration', 60*48)
+ expiration = int(expiration)
+ if cookie_name is NoDefault:
+ cookie_name = global_conf.get('session_cookie_name', '_SID_')
+ if session_file_path is NoDefault:
+ session_file_path = global_conf.get('session_file_path', '/tmp')
+ if chmod is NoDefault:
+ chmod = global_conf.get('session_chmod', None)
+ return SessionMiddleware(
+ app, session_expiration=session_expiration,
+ expiration=expiration, cookie_name=cookie_name,
+ session_file_path=session_file_path, chmod=chmod)
diff --git a/paste/transaction.py b/paste/transaction.py
new file mode 100644
index 0000000..a283067
--- /dev/null
+++ b/paste/transaction.py
@@ -0,0 +1,120 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# (c) 2005 Clark C. Evans
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware related to transactions and database connections.
+
+At this time it is very basic; but will eventually sprout all that
+two-phase commit goodness that I don't need.
+
+.. note::
+
+ This is experimental, and will change in the future.
+"""
+from paste.httpexceptions import HTTPException
+from wsgilib import catch_errors
+
+class TransactionManagerMiddleware(object):
+
+ def __init__(self, application):
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ environ['paste.transaction_manager'] = manager = Manager()
+ # This makes sure nothing else traps unexpected exceptions:
+ environ['paste.throw_errors'] = True
+ return catch_errors(self.application, environ, start_response,
+ error_callback=manager.error,
+ ok_callback=manager.finish)
+
+class Manager(object):
+
+ def __init__(self):
+ self.aborted = False
+ self.transactions = []
+
+ def abort(self):
+ self.aborted = True
+
+ def error(self, exc_info):
+ self.aborted = True
+ self.finish()
+
+ def finish(self):
+ for trans in self.transactions:
+ if self.aborted:
+ trans.rollback()
+ else:
+ trans.commit()
+
+
+class ConnectionFactory(object):
+ """
+ Provides a callable interface for connecting to ADBAPI databases in
+ a WSGI style (using the environment). More advanced connection
+ factories might use the REMOTE_USER and/or other environment
+ variables to make the connection returned depend upon the request.
+ """
+ def __init__(self, module, *args, **kwargs):
+ #assert getattr(module,'threadsaftey',0) > 0
+ self.module = module
+ self.args = args
+ self.kwargs = kwargs
+
+ # deal with database string quoting issues
+ self.quote = lambda s: "'%s'" % s.replace("'","''")
+ if hasattr(self.module,'PgQuoteString'):
+ self.quote = self.module.PgQuoteString
+
+ def __call__(self, environ=None):
+ conn = self.module.connect(*self.args, **self.kwargs)
+ conn.__dict__['module'] = self.module
+ conn.__dict__['quote'] = self.quote
+ return conn
+
+def BasicTransactionHandler(application, factory):
+ """
+ Provides a simple mechanism for starting a transaction based on the
+ factory; and for either committing or rolling back the transaction
+ depending on the result. It checks for the response's current
+ status code either through the latest call to start_response; or
+ through a HTTPException's code. If it is a 100, 200, or 300; the
+ transaction is committed; otherwise it is rolled back.
+ """
+ def basic_transaction(environ, start_response):
+ conn = factory(environ)
+ environ['paste.connection'] = conn
+ should_commit = [500]
+ def finalizer(exc_info=None):
+ if exc_info:
+ if isinstance(exc_info[1], HTTPException):
+ should_commit.append(exc_info[1].code)
+ if should_commit.pop() < 400:
+ conn.commit()
+ else:
+ try:
+ conn.rollback()
+ except:
+ # TODO: check if rollback has already happened
+ return
+ conn.close()
+ def basictrans_start_response(status, headers, exc_info = None):
+ should_commit.append(int(status.split(" ")[0]))
+ return start_response(status, headers, exc_info)
+ return catch_errors(application, environ, basictrans_start_response,
+ finalizer, finalizer)
+ return basic_transaction
+
+__all__ = ['ConnectionFactory', 'BasicTransactionHandler']
+
+if '__main__' == __name__ and False:
+ from pyPgSQL import PgSQL
+ factory = ConnectionFactory(PgSQL, database="testing")
+ conn = factory()
+ curr = conn.cursor()
+ curr.execute("SELECT now(), %s" % conn.quote("B'n\\'gles"))
+ (time, bing) = curr.fetchone()
+ print bing, time
+
diff --git a/paste/translogger.py b/paste/translogger.py
new file mode 100644
index 0000000..47de2d3
--- /dev/null
+++ b/paste/translogger.py
@@ -0,0 +1,121 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Middleware for logging requests, using Apache combined log format
+"""
+
+import logging
+import time
+import urllib
+
+class TransLogger(object):
+ """
+ This logging middleware will log all requests as they go through.
+ They are, by default, sent to a logger named ``'wsgi'`` at the
+ INFO level.
+
+ If ``setup_console_handler`` is true, then messages for the named
+ logger will be sent to the console.
+ """
+
+ format = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(time)s] '
+ '"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
+ '%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"')
+
+ def __init__(self, application,
+ logger=None,
+ format=None,
+ logging_level=logging.INFO,
+ logger_name='wsgi',
+ setup_console_handler=True,
+ set_logger_level=logging.DEBUG):
+ if format is not None:
+ self.format = format
+ self.application = application
+ self.logging_level = logging_level
+ self.logger_name = logger_name
+ if logger is None:
+ self.logger = logging.getLogger(self.logger_name)
+ if setup_console_handler:
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ # We need to control the exact format:
+ console.setFormatter(logging.Formatter('%(message)s'))
+ self.logger.addHandler(console)
+ self.logger.propagate = False
+ if set_logger_level is not None:
+ self.logger.setLevel(set_logger_level)
+ else:
+ self.logger = logger
+
+ def __call__(self, environ, start_response):
+ start = time.localtime()
+ req_uri = urllib.quote(environ.get('SCRIPT_NAME', '')
+ + environ.get('PATH_INFO', ''))
+ if environ.get('QUERY_STRING'):
+ req_uri += '?'+environ['QUERY_STRING']
+ method = environ['REQUEST_METHOD']
+ def replacement_start_response(status, headers, exc_info=None):
+ # @@: Ideally we would count the bytes going by if no
+ # content-length header was provided; but that does add
+ # some overhead, so at least for now we'll be lazy.
+ bytes = None
+ for name, value in headers:
+ if name.lower() == 'content-length':
+ bytes = value
+ self.write_log(environ, method, req_uri, start, status, bytes)
+ return start_response(status, headers)
+ return self.application(environ, replacement_start_response)
+
+ def write_log(self, environ, method, req_uri, start, status, bytes):
+ if bytes is None:
+ bytes = '-'
+ if time.daylight:
+ offset = time.altzone / 60 / 60 * -100
+ else:
+ offset = time.timezone / 60 / 60 * -100
+ if offset >= 0:
+ offset = "+%0.4d" % (offset)
+ elif offset < 0:
+ offset = "%0.4d" % (offset)
+ remote_addr = '-'
+ if environ.get('HTTP_X_FORWARDED_FOR'):
+ remote_addr = environ['HTTP_X_FORWARDED_FOR']
+ elif environ.get('REMOTE_ADDR'):
+ remote_addr = environ['REMOTE_ADDR']
+ d = {
+ 'REMOTE_ADDR': remote_addr,
+ 'REMOTE_USER': environ.get('REMOTE_USER') or '-',
+ 'REQUEST_METHOD': method,
+ 'REQUEST_URI': req_uri,
+ 'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
+ 'time': time.strftime('%d/%b/%Y:%H:%M:%S ', start) + offset,
+ 'status': status.split(None, 1)[0],
+ 'bytes': bytes,
+ 'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
+ 'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
+ }
+ message = self.format % d
+ self.logger.log(self.logging_level, message)
+
+def make_filter(
+ app, global_conf,
+ logger_name='wsgi',
+ format=None,
+ logging_level=logging.INFO,
+ setup_console_handler=True,
+ set_logger_level=logging.DEBUG):
+ from paste.util.converters import asbool
+ if isinstance(logging_level, basestring):
+ logging_level = logging._levelNames[logging_level]
+ if isinstance(set_logger_level, basestring):
+ set_logger_level = logging._levelNames[set_logger_level]
+ return TransLogger(
+ app,
+ format=format or None,
+ logging_level=logging_level,
+ logger_name=logger_name,
+ setup_console_handler=asbool(setup_console_handler),
+ set_logger_level=set_logger_level)
+
+make_filter.__doc__ = TransLogger.__doc__
diff --git a/paste/url.py b/paste/url.py
new file mode 100644
index 0000000..afc4fca
--- /dev/null
+++ b/paste/url.py
@@ -0,0 +1,475 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+This module implements a class for handling URLs.
+"""
+import urllib
+import cgi
+from paste import request
+# Imported lazily from FormEncode:
+variabledecode = None
+
+__all__ = ["URL", "Image"]
+
+def html_quote(v):
+ if v is None:
+ return ''
+ return cgi.escape(str(v), 1)
+
+def url_quote(v):
+ if v is None:
+ return ''
+ return urllib.quote(str(v))
+
+url_unquote = urllib.unquote
+
+def js_repr(v):
+ if v is None:
+ return 'null'
+ elif v is False:
+ return 'false'
+ elif v is True:
+ return 'true'
+ elif isinstance(v, list):
+ return '[%s]' % ', '.join(map(js_repr, v))
+ elif isinstance(v, dict):
+ return '{%s}' % ', '.join(
+ ['%s: %s' % (js_repr(key), js_repr(value))
+ for key, value in v])
+ elif isinstance(v, str):
+ return repr(v)
+ elif isinstance(v, unicode):
+ # @@: how do you do Unicode literals in Javascript?
+ return repr(v.encode('UTF-8'))
+ elif isinstance(v, (float, int)):
+ return repr(v)
+ elif isinstance(v, long):
+ return repr(v).lstrip('L')
+ elif hasattr(v, '__js_repr__'):
+ return v.__js_repr__()
+ else:
+ raise ValueError(
+ "I don't know how to turn %r into a Javascript representation"
+ % v)
+
+class URLResource(object):
+
+ """
+ This is an abstract superclass for different kinds of URLs
+ """
+
+ default_params = {}
+
+ def __init__(self, url, vars=None, attrs=None,
+ params=None):
+ self.url = url or '/'
+ self.vars = vars or []
+ self.attrs = attrs or {}
+ self.params = self.default_params.copy()
+ self.original_params = params or {}
+ if params:
+ self.params.update(params)
+
+ #@classmethod
+ def from_environ(cls, environ, with_query_string=True,
+ with_path_info=True, script_name=None,
+ path_info=None, querystring=None):
+ url = request.construct_url(
+ environ, with_query_string=False,
+ with_path_info=with_path_info, script_name=script_name,
+ path_info=path_info)
+ if with_query_string:
+ if querystring is None:
+ vars = request.parse_querystring(environ)
+ else:
+ vars = cgi.parse_qsl(
+ querystring,
+ keep_blank_values=True,
+ strict_parsing=False)
+ else:
+ vars = None
+ v = cls(url, vars=vars)
+ return v
+
+ from_environ = classmethod(from_environ)
+
+ def __call__(self, *args, **kw):
+ res = self._add_positional(args)
+ res = res._add_vars(kw)
+ return res
+
+ def __getitem__(self, item):
+ if '=' in item:
+ name, value = item.split('=', 1)
+ return self._add_vars({url_unquote(name): url_unquote(value)})
+ return self._add_positional((item,))
+
+ def attr(self, **kw):
+ for key in kw.keys():
+ if key.endswith('_'):
+ kw[key[:-1]] = kw[key]
+ del kw[key]
+ new_attrs = self.attrs.copy()
+ new_attrs.update(kw)
+ return self.__class__(self.url, vars=self.vars,
+ attrs=new_attrs,
+ params=self.original_params)
+
+ def param(self, **kw):
+ new_params = self.original_params.copy()
+ new_params.update(kw)
+ return self.__class__(self.url, vars=self.vars,
+ attrs=self.attrs,
+ params=new_params)
+
+ def coerce_vars(self, vars):
+ global variabledecode
+ need_variable_encode = False
+ for key, value in vars.items():
+ if isinstance(value, dict):
+ need_variable_encode = True
+ if key.endswith('_'):
+ vars[key[:-1]] = vars[key]
+ del vars[key]
+ if need_variable_encode:
+ if variabledecode is None:
+ from formencode import variabledecode
+ vars = variabledecode.variable_encode(vars)
+ return vars
+
+
+ def var(self, **kw):
+ kw = self.coerce_vars(kw)
+ new_vars = self.vars + kw.items()
+ return self.__class__(self.url, vars=new_vars,
+ attrs=self.attrs,
+ params=self.original_params)
+
+ def setvar(self, **kw):
+ """
+ Like ``.var(...)``, except overwrites keys, where .var simply
+ extends the keys. Setting a variable to None here will
+ effectively delete it.
+ """
+ kw = self.coerce_vars(kw)
+ new_vars = []
+ for name, values in self.vars:
+ if name in kw:
+ continue
+ new_vars.append((name, values))
+ new_vars.extend(kw.items())
+ return self.__class__(self.url, vars=new_vars,
+ attrs=self.attrs,
+ params=self.original_params)
+
+ def setvars(self, **kw):
+ """
+ Creates a copy of this URL, but with all the variables set/reset
+ (like .setvar(), except clears past variables at the same time)
+ """
+ return self.__class__(self.url, vars=kw.items(),
+ attrs=self.attrs,
+ params=self.original_params)
+
+ def addpath(self, *paths):
+ u = self
+ for path in paths:
+ path = str(path).lstrip('/')
+ new_url = u.url
+ if not new_url.endswith('/'):
+ new_url += '/'
+ u = u.__class__(new_url+path, vars=u.vars,
+ attrs=u.attrs,
+ params=u.original_params)
+ return u
+
+ __div__ = addpath
+
+ def become(self, OtherClass):
+ return OtherClass(self.url, vars=self.vars,
+ attrs=self.attrs,
+ params=self.original_params)
+
+ def href__get(self):
+ s = self.url
+ if self.vars:
+ s += '?'
+ vars = []
+ for name, val in self.vars:
+ if isinstance(val, (list, tuple)):
+ val = [v for v in val if v is not None]
+ elif val is None:
+ continue
+ vars.append((name, val))
+ s += urllib.urlencode(vars, True)
+ return s
+
+ href = property(href__get)
+
+ def __repr__(self):
+ base = '<%s %s' % (self.__class__.__name__,
+ self.href or "''")
+ if self.attrs:
+ base += ' attrs(%s)' % (
+ ' '.join(['%s="%s"' % (html_quote(n), html_quote(v))
+ for n, v in self.attrs.items()]))
+ if self.original_params:
+ base += ' params(%s)' % (
+ ', '.join(['%s=%r' % (n, v)
+ for n, v in self.attrs.items()]))
+ return base + '>'
+
+ def html__get(self):
+ if not self.params.get('tag'):
+ raise ValueError(
+ "You cannot get the HTML of %r until you set the "
+ "'tag' param'" % self)
+ content = self._get_content()
+ tag = '<%s' % self.params.get('tag')
+ attrs = ' '.join([
+ '%s="%s"' % (html_quote(n), html_quote(v))
+ for n, v in self._html_attrs()])
+ if attrs:
+ tag += ' ' + attrs
+ tag += self._html_extra()
+ if content is None:
+ return tag + ' />'
+ else:
+ return '%s>%s</%s>' % (tag, content, self.params.get('tag'))
+
+ html = property(html__get)
+
+ def _html_attrs(self):
+ return self.attrs.items()
+
+ def _html_extra(self):
+ return ''
+
+ def _get_content(self):
+ """
+ Return the content for a tag (for self.html); return None
+ for an empty tag (like ``<img />``)
+ """
+ raise NotImplementedError
+
+ def _add_vars(self, vars):
+ raise NotImplementedError
+
+ def _add_positional(self, args):
+ raise NotImplementedError
+
+class URL(URLResource):
+
+ r"""
+ >>> u = URL('http://localhost')
+ >>> u
+ <URL http://localhost>
+ >>> u = u['view']
+ >>> str(u)
+ 'http://localhost/view'
+ >>> u['//foo'].param(content='view').html
+ '<a href="http://localhost/view/foo">view</a>'
+ >>> u.param(confirm='Really?', content='goto').html
+ '<a href="http://localhost/view" onclick="return confirm(\'Really?\')">goto</a>'
+ >>> u(title='See "it"', content='goto').html
+ '<a href="http://localhost/view?title=See+%22it%22">goto</a>'
+ >>> u('another', var='fuggetaboutit', content='goto').html
+ '<a href="http://localhost/view/another?var=fuggetaboutit">goto</a>'
+ >>> u.attr(content='goto').html
+ Traceback (most recent call last):
+ ....
+ ValueError: You must give a content param to <URL http://localhost/view attrs(content="goto")> generate anchor tags
+ >>> str(u['foo=bar%20stuff'])
+ 'http://localhost/view?foo=bar+stuff'
+ """
+
+ default_params = {'tag': 'a'}
+
+ def __str__(self):
+ return self.href
+
+ def _get_content(self):
+ if not self.params.get('content'):
+ raise ValueError(
+ "You must give a content param to %r generate anchor tags"
+ % self)
+ return self.params['content']
+
+ def _add_vars(self, vars):
+ url = self
+ for name in ('confirm', 'content'):
+ if name in vars:
+ url = url.param(**{name: vars.pop(name)})
+ if 'target' in vars:
+ url = url.attr(target=vars.pop('target'))
+ return url.var(**vars)
+
+ def _add_positional(self, args):
+ return self.addpath(*args)
+
+ def _html_attrs(self):
+ attrs = self.attrs.items()
+ attrs.insert(0, ('href', self.href))
+ if self.params.get('confirm'):
+ attrs.append(('onclick', 'return confirm(%s)'
+ % js_repr(self.params['confirm'])))
+ return attrs
+
+ def onclick_goto__get(self):
+ return 'location.href=%s; return false' % js_repr(self.href)
+
+ onclick_goto = property(onclick_goto__get)
+
+ def button__get(self):
+ return self.become(Button)
+
+ button = property(button__get)
+
+ def js_popup__get(self):
+ return self.become(JSPopup)
+
+ js_popup = property(js_popup__get)
+
+class Image(URLResource):
+
+ r"""
+ >>> i = Image('/images')
+ >>> i = i / '/foo.png'
+ >>> i.html
+ '<img src="/images/foo.png" />'
+ >>> str(i['alt=foo'])
+ '<img src="/images/foo.png" alt="foo" />'
+ >>> i.href
+ '/images/foo.png'
+ """
+
+ default_params = {'tag': 'img'}
+
+ def __str__(self):
+ return self.html
+
+ def _get_content(self):
+ return None
+
+ def _add_vars(self, vars):
+ return self.attr(**vars)
+
+ def _add_positional(self, args):
+ return self.addpath(*args)
+
+ def _html_attrs(self):
+ attrs = self.attrs.items()
+ attrs.insert(0, ('src', self.href))
+ return attrs
+
+class Button(URLResource):
+
+ r"""
+ >>> u = URL('/')
+ >>> u = u / 'delete'
+ >>> b = u.button['confirm=Sure?'](id=5, content='del')
+ >>> str(b)
+ '<button onclick="if (confirm(\'Sure?\')) {location.href=\'/delete?id=5\'}; return false">del</button>'
+ """
+
+ default_params = {'tag': 'button'}
+
+ def __str__(self):
+ return self.html
+
+ def _get_content(self):
+ if self.params.get('content'):
+ return self.params['content']
+ if self.attrs.get('value'):
+ return self.attrs['content']
+ # @@: Error?
+ return None
+
+ def _add_vars(self, vars):
+ button = self
+ if 'confirm' in vars:
+ button = button.param(confirm=vars.pop('confirm'))
+ if 'content' in vars:
+ button = button.param(content=vars.pop('content'))
+ return button.var(**vars)
+
+ def _add_positional(self, args):
+ return self.addpath(*args)
+
+ def _html_attrs(self):
+ attrs = self.attrs.items()
+ onclick = 'location.href=%s' % js_repr(self.href)
+ if self.params.get('confirm'):
+ onclick = 'if (confirm(%s)) {%s}' % (
+ js_repr(self.params['confirm']), onclick)
+ onclick += '; return false'
+ attrs.insert(0, ('onclick', onclick))
+ return attrs
+
+class JSPopup(URLResource):
+
+ r"""
+ >>> u = URL('/')
+ >>> u = u / 'view'
+ >>> j = u.js_popup(content='view')
+ >>> j.html
+ '<a href="/view" onclick="window.open(\'/view\', \'_blank\'); return false" target="_blank">view</a>'
+ """
+
+ default_params = {'tag': 'a', 'target': '_blank'}
+
+ def _add_vars(self, vars):
+ button = self
+ for var in ('width', 'height', 'stripped', 'content'):
+ if var in vars:
+ button = button.param(**{var: vars.pop(var)})
+ return button.var(**vars)
+
+ def _window_args(self):
+ p = self.params
+ features = []
+ if p.get('stripped'):
+ p['location'] = p['status'] = p['toolbar'] = '0'
+ for param in 'channelmode directories fullscreen location menubar resizable scrollbars status titlebar'.split():
+ if param not in p:
+ continue
+ v = p[param]
+ if v not in ('yes', 'no', '1', '0'):
+ if v:
+ v = '1'
+ else:
+ v = '0'
+ features.append('%s=%s' % (param, v))
+ for param in 'height left top width':
+ if not p.get(param):
+ continue
+ features.append('%s=%s' % (param, p[param]))
+ args = [self.href, p['target']]
+ if features:
+ args.append(','.join(features))
+ return ', '.join(map(js_repr, args))
+
+ def _html_attrs(self):
+ attrs = self.attrs.items()
+ onclick = ('window.open(%s); return false'
+ % self._window_args())
+ attrs.insert(0, ('target', self.params['target']))
+ attrs.insert(0, ('onclick', onclick))
+ attrs.insert(0, ('href', self.href))
+ return attrs
+
+ def _get_content(self):
+ if not self.params.get('content'):
+ raise ValueError(
+ "You must give a content param to %r generate anchor tags"
+ % self)
+ return self.params['content']
+
+ def _add_positional(self, args):
+ return self.addpath(*args)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/paste/urlmap.py b/paste/urlmap.py
new file mode 100644
index 0000000..a636531
--- /dev/null
+++ b/paste/urlmap.py
@@ -0,0 +1,250 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Map URL prefixes to WSGI applications. See ``URLMap``
+"""
+
+from UserDict import DictMixin
+import re
+import os
+import cgi
+from paste import httpexceptions
+
+__all__ = ['URLMap', 'PathProxyURLMap']
+
+def urlmap_factory(loader, global_conf, **local_conf):
+ if 'not_found_app' in local_conf:
+ not_found_app = local_conf.pop('not_found_app')
+ else:
+ not_found_app = global_conf.get('not_found_app')
+ if not_found_app:
+ not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
+ urlmap = URLMap(not_found_app=not_found_app)
+ for path, app_name in local_conf.items():
+ path = parse_path_expression(path)
+ app = loader.get_app(app_name, global_conf=global_conf)
+ urlmap[path] = app
+ return urlmap
+
+def parse_path_expression(path):
+ """
+ Parses a path expression like 'domain foobar.com port 20 /' or
+ just '/foobar' for a path alone. Returns as an address that
+ URLMap likes.
+ """
+ parts = path.split()
+ domain = port = path = None
+ while parts:
+ if parts[0] == 'domain':
+ parts.pop(0)
+ if not parts:
+ raise ValueError("'domain' must be followed with a domain name")
+ if domain:
+ raise ValueError("'domain' given twice")
+ domain = parts.pop(0)
+ elif parts[0] == 'port':
+ parts.pop(0)
+ if not parts:
+ raise ValueError("'port' must be followed with a port number")
+ if port:
+ raise ValueError("'port' given twice")
+ port = parts.pop(0)
+ else:
+ if path:
+ raise ValueError("more than one path given (have %r, got %r)"
+ % (path, parts[0]))
+ path = parts.pop(0)
+ s = ''
+ if domain:
+ s = 'http://%s' % domain
+ if port:
+ if not domain:
+ raise ValueError("If you give a port, you must also give a domain")
+ s += ':' + port
+ if path:
+ if s:
+ s += '/'
+ s += path
+ return s
+
+class URLMap(DictMixin):
+
+ """
+ URLMap instances are dictionary-like object that dispatch to one
+ of several applications based on the URL.
+
+ The dictionary keys are URLs to match (like
+ ``PATH_INFO.startswith(url)``), and the values are applications to
+ dispatch to. URLs are matched most-specific-first, i.e., longest
+ URL first. The ``SCRIPT_NAME`` and ``PATH_INFO`` environmental
+ variables are adjusted to indicate the new context.
+
+ URLs can also include domains, like ``http://blah.com/foo``, or as
+ tuples ``('blah.com', '/foo')``. This will match domain names; without
+ the ``http://domain`` or with a domain of ``None`` any domain will be
+ matched (so long as no other explicit domain matches). """
+
+ def __init__(self, not_found_app=None):
+ self.applications = []
+ if not not_found_app:
+ not_found_app = self.not_found_app
+ self.not_found_application = not_found_app
+
+ norm_url_re = re.compile('//+')
+ domain_url_re = re.compile('^(http|https)://')
+
+ def not_found_app(self, environ, start_response):
+ mapper = environ.get('paste.urlmap_object')
+ if mapper:
+ matches = [p for p, a in mapper.applications]
+ extra = 'defined apps: %s' % (
+ ',\n '.join(map(repr, matches)))
+ else:
+ extra = ''
+ extra += '\nSCRIPT_NAME: %r' % environ.get('SCRIPT_NAME')
+ extra += '\nPATH_INFO: %r' % environ.get('PATH_INFO')
+ extra += '\nHTTP_HOST: %r' % environ.get('HTTP_HOST')
+ app = httpexceptions.HTTPNotFound(
+ environ['PATH_INFO'],
+ comment=cgi.escape(extra)).wsgi_application
+ return app(environ, start_response)
+
+ def normalize_url(self, url, trim=True):
+ if isinstance(url, (list, tuple)):
+ domain = url[0]
+ url = self.normalize_url(url[1])[1]
+ return domain, url
+ assert (not url or url.startswith('/')
+ or self.domain_url_re.search(url)), (
+ "URL fragments must start with / or http:// (you gave %r)" % url)
+ match = self.domain_url_re.search(url)
+ if match:
+ url = url[match.end():]
+ if '/' in url:
+ domain, url = url.split('/', 1)
+ url = '/' + url
+ else:
+ domain, url = url, ''
+ else:
+ domain = None
+ url = self.norm_url_re.sub('/', url)
+ if trim:
+ url = url.rstrip('/')
+ return domain, url
+
+ def sort_apps(self):
+ """
+ Make sure applications are sorted with longest URLs first
+ """
+ def key(app_desc):
+ (domain, url), app = app_desc
+ if not domain:
+ # Make sure empty domains sort last:
+ return '\xff', -len(url)
+ else:
+ return domain, -len(url)
+ apps = [(key(desc), desc) for desc in self.applications]
+ apps.sort()
+ self.applications = [desc for (sortable, desc) in apps]
+
+ def __setitem__(self, url, app):
+ if app is None:
+ try:
+ del self[url]
+ except KeyError:
+ pass
+ return
+ dom_url = self.normalize_url(url)
+ if dom_url in self:
+ del self[dom_url]
+ self.applications.append((dom_url, app))
+ self.sort_apps()
+
+ def __getitem__(self, url):
+ dom_url = self.normalize_url(url)
+ for app_url, app in self.applications:
+ if app_url == dom_url:
+ return app
+ raise KeyError(
+ "No application with the url %r (domain: %r; existing: %s)"
+ % (url[1], url[0] or '*', self.applications))
+
+ def __delitem__(self, url):
+ url = self.normalize_url(url)
+ for app_url, app in self.applications:
+ if app_url == url:
+ self.applications.remove((app_url, app))
+ break
+ else:
+ raise KeyError(
+ "No application with the url %r" % (url,))
+
+ def keys(self):
+ return [app_url for app_url, app in self.applications]
+
+ def __call__(self, environ, start_response):
+ host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
+ if ':' in host:
+ host, port = host.split(':', 1)
+ else:
+ if environ['wsgi.url_scheme'] == 'http':
+ port = '80'
+ else:
+ port = '443'
+ path_info = environ.get('PATH_INFO')
+ path_info = self.normalize_url(path_info, False)[1]
+ for (domain, app_url), app in self.applications:
+ if domain and domain != host and domain != host+':'+port:
+ continue
+ if (path_info == app_url
+ or path_info.startswith(app_url + '/')):
+ environ['SCRIPT_NAME'] += app_url
+ environ['PATH_INFO'] = path_info[len(app_url):]
+ return app(environ, start_response)
+ environ['paste.urlmap_object'] = self
+ return self.not_found_application(environ, start_response)
+
+
+class PathProxyURLMap(object):
+
+ """
+ This is a wrapper for URLMap that catches any strings that
+ are passed in as applications; these strings are treated as
+ filenames (relative to `base_path`) and are passed to the
+ callable `builder`, which will return an application.
+
+ This is intended for cases when configuration files can be
+ treated as applications.
+
+ `base_paste_url` is the URL under which all applications added through
+ this wrapper must go. Use ``""`` if you want this to not
+ change incoming URLs.
+ """
+
+ def __init__(self, map, base_paste_url, base_path, builder):
+ self.map = map
+ self.base_paste_url = self.map.normalize_url(base_paste_url)
+ self.base_path = base_path
+ self.builder = builder
+
+ def __setitem__(self, url, app):
+ if isinstance(app, (str, unicode)):
+ app_fn = os.path.join(self.base_path, app)
+ app = self.builder(app_fn)
+ url = self.map.normalize_url(url)
+ # @@: This means http://foo.com/bar will potentially
+ # match foo.com, but /base_paste_url/bar, which is unintuitive
+ url = (url[0] or self.base_paste_url[0],
+ self.base_paste_url[1] + url[1])
+ self.map[url] = app
+
+ def __getattr__(self, attr):
+ return getattr(self.map, attr)
+
+ # This is really the only settable attribute
+ def not_found_application__get(self):
+ return self.map.not_found_application
+ def not_found_application__set(self, value):
+ self.map.not_found_application = value
+ not_found_application = property(not_found_application__get,
+ not_found_application__set)
diff --git a/paste/urlparser.py b/paste/urlparser.py
new file mode 100644
index 0000000..a7469e3
--- /dev/null
+++ b/paste/urlparser.py
@@ -0,0 +1,638 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+WSGI applications that parse the URL and dispatch to on-disk resources
+"""
+
+import os
+import sys
+import imp
+import mimetypes
+try:
+ import pkg_resources
+except ImportError:
+ pkg_resources = None
+from paste import request
+from paste import fileapp
+from paste.util import import_string
+from paste import httpexceptions
+from httpheaders import ETAG
+from paste.util import converters
+
+class NoDefault(object):
+ pass
+
+__all__ = ['URLParser', 'StaticURLParser', 'PkgResourcesParser']
+
+class URLParser(object):
+
+ """
+ WSGI middleware
+
+ Application dispatching, based on URL. An instance of `URLParser` is
+ an application that loads and delegates to other applications. It
+ looks for files in its directory that match the first part of
+ PATH_INFO; these may have an extension, but are not required to have
+ one, in which case the available files are searched to find the
+ appropriate file. If it is ambiguous, a 404 is returned and an error
+ logged.
+
+ By default there is a constructor for .py files that loads the module,
+ and looks for an attribute ``application``, which is a ready
+ application object, or an attribute that matches the module name,
+ which is a factory for building applications, and is called with no
+ arguments.
+
+ URLParser will also look in __init__.py for special overrides.
+ These overrides are:
+
+ ``urlparser_hook(environ)``
+ This can modify the environment. Its return value is ignored,
+ and it cannot be used to change the response in any way. You
+ *can* use this, for example, to manipulate SCRIPT_NAME/PATH_INFO
+ (try to keep them consistent with the original URL -- but
+ consuming PATH_INFO and moving that to SCRIPT_NAME is ok).
+
+ ``urlparser_wrap(environ, start_response, app)``:
+ After URLParser finds the application, it calls this function
+ (if present). If this function doesn't call
+ ``app(environ, start_response)`` then the application won't be
+ called at all! This can be used to allocate resources (with
+ ``try:finally:``) or otherwise filter the output of the
+ application.
+
+ ``not_found_hook(environ, start_response)``:
+ If no file can be found (*in this directory*) to match the
+ request, then this WSGI application will be called. You can
+ use this to change the URL and pass the request back to
+ URLParser again, or on to some other application. This
+ doesn't catch all ``404 Not Found`` responses, just missing
+ files.
+
+ ``application(environ, start_response)``:
+ This basically overrides URLParser completely, and the given
+ application is used for all requests. ``urlparser_wrap`` and
+ ``urlparser_hook`` are still called, but the filesystem isn't
+ searched in any way.
+ """
+
+ parsers_by_directory = {}
+
+ # This is lazily initialized
+ init_module = NoDefault
+
+ global_constructors = {}
+
+ def __init__(self, global_conf,
+ directory, base_python_name,
+ index_names=NoDefault,
+ hide_extensions=NoDefault,
+ ignore_extensions=NoDefault,
+ constructors=None,
+ **constructor_conf):
+ """
+ Create a URLParser object that looks at `directory`.
+ `base_python_name` is the package that this directory
+ represents, thus any Python modules in this directory will
+ be given names under this package.
+ """
+ if global_conf:
+ import warnings
+ warnings.warn(
+ 'The global_conf argument to URLParser is deprecated; '
+ 'either pass in None or {}, or use make_url_parser',
+ DeprecationWarning)
+ else:
+ global_conf = {}
+ if os.path.sep != '/':
+ directory = directory.replace(os.path.sep, '/')
+ self.directory = directory
+ self.base_python_name = base_python_name
+ # This logic here should be deprecated since it is in
+ # make_url_parser
+ if index_names is NoDefault:
+ index_names = global_conf.get(
+ 'index_names', ('index', 'Index', 'main', 'Main'))
+ self.index_names = converters.aslist(index_names)
+ if hide_extensions is NoDefault:
+ hide_extensions = global_conf.get(
+ 'hide_extensions', ('.pyc', '.bak', '.py~', '.pyo'))
+ self.hide_extensions = converters.aslist(hide_extensions)
+ if ignore_extensions is NoDefault:
+ ignore_extensions = global_conf.get(
+ 'ignore_extensions', ())
+ self.ignore_extensions = converters.aslist(ignore_extensions)
+ self.constructors = self.global_constructors.copy()
+ if constructors:
+ self.constructors.update(constructors)
+ # @@: Should we also check the global options for constructors?
+ for name, value in constructor_conf.items():
+ if not name.startswith('constructor '):
+ raise ValueError(
+ "Only extra configuration keys allowed are "
+ "'constructor .ext = import_expr'; you gave %r "
+ "(=%r)" % (name, value))
+ ext = name[len('constructor '):].strip()
+ if isinstance(value, (str, unicode)):
+ value = import_string.eval_import(value)
+ self.constructors[ext] = value
+
+ def __call__(self, environ, start_response):
+ environ['paste.urlparser.base_python_name'] = self.base_python_name
+ if self.init_module is NoDefault:
+ self.init_module = self.find_init_module(environ)
+ path_info = environ.get('PATH_INFO', '')
+ if not path_info:
+ return self.add_slash(environ, start_response)
+ if (self.init_module
+ and getattr(self.init_module, 'urlparser_hook', None)):
+ self.init_module.urlparser_hook(environ)
+ orig_path_info = environ['PATH_INFO']
+ orig_script_name = environ['SCRIPT_NAME']
+ application, filename = self.find_application(environ)
+ if not application:
+ if (self.init_module
+ and getattr(self.init_module, 'not_found_hook', None)
+ and environ.get('paste.urlparser.not_found_parser') is not self):
+ not_found_hook = self.init_module.not_found_hook
+ environ['paste.urlparser.not_found_parser'] = self
+ environ['PATH_INFO'] = orig_path_info
+ environ['SCRIPT_NAME'] = orig_script_name
+ return not_found_hook(environ, start_response)
+ if filename is None:
+ name, rest_of_path = request.path_info_split(environ['PATH_INFO'])
+ if not name:
+ name = 'one of %s' % ', '.join(
+ self.index_names or
+ ['(no index_names defined)'])
+
+ return self.not_found(
+ environ, start_response,
+ 'Tried to load %s from directory %s'
+ % (name, self.directory))
+ else:
+ environ['wsgi.errors'].write(
+ 'Found resource %s, but could not construct application\n'
+ % filename)
+ return self.not_found(
+ environ, start_response,
+ 'Tried to load %s from directory %s'
+ % (filename, self.directory))
+ if (self.init_module
+ and getattr(self.init_module, 'urlparser_wrap', None)):
+ return self.init_module.urlparser_wrap(
+ environ, start_response, application)
+ else:
+ return application(environ, start_response)
+
+ def find_application(self, environ):
+ if (self.init_module
+ and getattr(self.init_module, 'application', None)
+ and not environ.get('paste.urlparser.init_application') == environ['SCRIPT_NAME']):
+ environ['paste.urlparser.init_application'] = environ['SCRIPT_NAME']
+ return self.init_module.application, None
+ name, rest_of_path = request.path_info_split(environ['PATH_INFO'])
+ environ['PATH_INFO'] = rest_of_path
+ if name is not None:
+ environ['SCRIPT_NAME'] = environ.get('SCRIPT_NAME', '') + '/' + name
+ if not name:
+ names = self.index_names
+ for index_name in names:
+ filename = self.find_file(environ, index_name)
+ if filename:
+ break
+ else:
+ # None of the index files found
+ filename = None
+ else:
+ filename = self.find_file(environ, name)
+ if filename is None:
+ return None, filename
+ else:
+ return self.get_application(environ, filename), filename
+
+ def not_found(self, environ, start_response, debug_message=None):
+ exc = httpexceptions.HTTPNotFound(
+ 'The resource at %s could not be found'
+ % request.construct_url(environ),
+ comment=debug_message)
+ return exc.wsgi_application(environ, start_response)
+
+ def add_slash(self, environ, start_response):
+ """
+ This happens when you try to get to a directory
+ without a trailing /
+ """
+ url = request.construct_url(environ, with_query_string=False)
+ url += '/'
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ exc = httpexceptions.HTTPMovedPermanently(
+ 'The resource has moved to %s - you should be redirected '
+ 'automatically.' % url,
+ headers=[('location', url)])
+ return exc.wsgi_application(environ, start_response)
+
+ def find_file(self, environ, base_filename):
+ possible = []
+ """Cache a few values to reduce function call overhead"""
+ for filename in os.listdir(self.directory):
+ base, ext = os.path.splitext(filename)
+ full_filename = os.path.join(self.directory, filename)
+ if (ext in self.hide_extensions
+ or not base):
+ continue
+ if filename == base_filename:
+ possible.append(full_filename)
+ continue
+ if ext in self.ignore_extensions:
+ continue
+ if base == base_filename:
+ possible.append(full_filename)
+ if not possible:
+ #environ['wsgi.errors'].write(
+ # 'No file found matching %r in %s\n'
+ # % (base_filename, self.directory))
+ return None
+ if len(possible) > 1:
+ # If there is an exact match, this isn't 'ambiguous'
+ # per se; it might mean foo.gif and foo.gif.back for
+ # instance
+ if full_filename in possible:
+ return full_filename
+ else:
+ environ['wsgi.errors'].write(
+ 'Ambiguous URL: %s; matches files %s\n'
+ % (request.construct_url(environ),
+ ', '.join(possible)))
+ return None
+ return possible[0]
+
+ def get_application(self, environ, filename):
+ if os.path.isdir(filename):
+ t = 'dir'
+ else:
+ t = os.path.splitext(filename)[1]
+ constructor = self.constructors.get(t, self.constructors.get('*'))
+ if constructor is None:
+ #environ['wsgi.errors'].write(
+ # 'No constructor found for %s\n' % t)
+ return constructor
+ app = constructor(self, environ, filename)
+ if app is None:
+ #environ['wsgi.errors'].write(
+ # 'Constructor %s return None for %s\n' %
+ # (constructor, filename))
+ pass
+ return app
+
+ def register_constructor(cls, extension, constructor):
+ """
+ Register a function as a constructor. Registered constructors
+ apply to all instances of `URLParser`.
+
+ The extension should have a leading ``.``, or the special
+ extensions ``dir`` (for directories) and ``*`` (a catch-all).
+
+ `constructor` must be a callable that takes two arguments:
+ ``environ`` and ``filename``, and returns a WSGI application.
+ """
+ d = cls.global_constructors
+ assert not d.has_key(extension), (
+ "A constructor already exists for the extension %r (%r) "
+ "when attemption to register constructor %r"
+ % (extension, d[extension], constructor))
+ d[extension] = constructor
+ register_constructor = classmethod(register_constructor)
+
+ def get_parser(self, directory, base_python_name):
+ """
+ Get a parser for the given directory, or create one if
+ necessary. This way parsers can be cached and reused.
+
+ # @@: settings are inherited from the first caller
+ """
+ try:
+ return self.parsers_by_directory[(directory, base_python_name)]
+ except KeyError:
+ parser = self.__class__(
+ {},
+ directory, base_python_name,
+ index_names=self.index_names,
+ hide_extensions=self.hide_extensions,
+ ignore_extensions=self.ignore_extensions,
+ constructors=self.constructors)
+ self.parsers_by_directory[(directory, base_python_name)] = parser
+ return parser
+
+ def find_init_module(self, environ):
+ filename = os.path.join(self.directory, '__init__.py')
+ if not os.path.exists(filename):
+ return None
+ return load_module(environ, filename)
+
+ def __repr__(self):
+ return '<%s directory=%r; module=%s at %s>' % (
+ self.__class__.__name__,
+ self.directory,
+ self.base_python_name,
+ hex(abs(id(self))))
+
+def make_directory(parser, environ, filename):
+ base_python_name = environ['paste.urlparser.base_python_name']
+ if base_python_name:
+ base_python_name += "." + os.path.basename(filename)
+ else:
+ base_python_name = os.path.basename(filename)
+ return parser.get_parser(filename, base_python_name)
+
+URLParser.register_constructor('dir', make_directory)
+
+def make_unknown(parser, environ, filename):
+ return fileapp.FileApp(filename)
+
+URLParser.register_constructor('*', make_unknown)
+
+def load_module(environ, filename):
+ base_python_name = environ['paste.urlparser.base_python_name']
+ module_name = os.path.splitext(os.path.basename(filename))[0]
+ if base_python_name:
+ module_name = base_python_name + '.' + module_name
+ return load_module_from_name(environ, filename, module_name,
+ environ['wsgi.errors'])
+
+def load_module_from_name(environ, filename, module_name, errors):
+ if sys.modules.has_key(module_name):
+ return sys.modules[module_name]
+ init_filename = os.path.join(os.path.dirname(filename), '__init__.py')
+ if not os.path.exists(init_filename):
+ try:
+ f = open(init_filename, 'w')
+ except (OSError, IOError), e:
+ errors.write(
+ 'Cannot write __init__.py file into directory %s (%s)\n'
+ % (os.path.dirname(filename), e))
+ return None
+ f.write('#\n')
+ f.close()
+ fp = None
+ if sys.modules.has_key(module_name):
+ return sys.modules[module_name]
+ if '.' in module_name:
+ parent_name = '.'.join(module_name.split('.')[:-1])
+ base_name = module_name.split('.')[-1]
+ parent = load_module_from_name(environ, os.path.dirname(filename),
+ parent_name, errors)
+ else:
+ base_name = module_name
+ fp = None
+ try:
+ fp, pathname, stuff = imp.find_module(
+ base_name, [os.path.dirname(filename)])
+ module = imp.load_module(module_name, fp, pathname, stuff)
+ finally:
+ if fp is not None:
+ fp.close()
+ return module
+
+def make_py(parser, environ, filename):
+ module = load_module(environ, filename)
+ if not module:
+ return None
+ if hasattr(module, 'application') and module.application:
+ return getattr(module.application, 'wsgi_application', module.application)
+ base_name = module.__name__.split('.')[-1]
+ if hasattr(module, base_name):
+ obj = getattr(module, base_name)
+ if hasattr(obj, 'wsgi_application'):
+ return obj.wsgi_application
+ else:
+ # @@: Old behavior; should probably be deprecated eventually:
+ return getattr(module, base_name)()
+ environ['wsgi.errors'].write(
+ "Cound not find application or %s in %s\n"
+ % (base_name, module))
+ return None
+
+URLParser.register_constructor('.py', make_py)
+
+class StaticURLParser(object):
+ """
+ Like ``URLParser`` but only serves static files.
+
+ ``cache_max_age``:
+ integer specifies Cache-Control max_age in seconds
+ """
+ # @@: Should URLParser subclass from this?
+
+ def __init__(self, directory, root_directory=None,
+ cache_max_age=None):
+ self.directory = self.normpath(directory)
+ self.root_directory = self.normpath(root_directory or directory)
+ self.cache_max_age = cache_max_age
+
+ def normpath(path):
+ return os.path.normcase(os.path.abspath(path))
+ normpath = staticmethod(normpath)
+
+ def __call__(self, environ, start_response):
+ path_info = environ.get('PATH_INFO', '')
+ if not path_info:
+ return self.add_slash(environ, start_response)
+ if path_info == '/':
+ # @@: This should obviously be configurable
+ filename = 'index.html'
+ else:
+ filename = request.path_info_pop(environ)
+ full = self.normpath(os.path.join(self.directory, filename))
+ if not full.startswith(self.root_directory):
+ # Out of bounds
+ return self.not_found(environ, start_response)
+ if not os.path.exists(full):
+ return self.not_found(environ, start_response)
+ if os.path.isdir(full):
+ # @@: Cache?
+ return self.__class__(full, root_directory=self.root_directory,
+ cache_max_age=self.cache_max_age)(environ,
+ start_response)
+ if environ.get('PATH_INFO') and environ.get('PATH_INFO') != '/':
+ return self.error_extra_path(environ, start_response)
+ if_none_match = environ.get('HTTP_IF_NONE_MATCH')
+ if if_none_match:
+ mytime = os.stat(full).st_mtime
+ if str(mytime) == if_none_match:
+ headers = []
+ ## FIXME: probably should be
+ ## ETAG.update(headers, '"%s"' % mytime)
+ ETAG.update(headers, mytime)
+ start_response('304 Not Modified', headers)
+ return [''] # empty body
+
+ fa = self.make_app(full)
+ if self.cache_max_age:
+ fa.cache_control(max_age=self.cache_max_age)
+ return fa(environ, start_response)
+
+ def make_app(self, filename):
+ return fileapp.FileApp(filename)
+
+ def add_slash(self, environ, start_response):
+ """
+ This happens when you try to get to a directory
+ without a trailing /
+ """
+ url = request.construct_url(environ, with_query_string=False)
+ url += '/'
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ exc = httpexceptions.HTTPMovedPermanently(
+ 'The resource has moved to %s - you should be redirected '
+ 'automatically.' % url,
+ headers=[('location', url)])
+ return exc.wsgi_application(environ, start_response)
+
+ def not_found(self, environ, start_response, debug_message=None):
+ exc = httpexceptions.HTTPNotFound(
+ 'The resource at %s could not be found'
+ % request.construct_url(environ),
+ comment='SCRIPT_NAME=%r; PATH_INFO=%r; looking in %r; debug: %s'
+ % (environ.get('SCRIPT_NAME'), environ.get('PATH_INFO'),
+ self.directory, debug_message or '(none)'))
+ return exc.wsgi_application(environ, start_response)
+
+ def error_extra_path(self, environ, start_response):
+ exc = httpexceptions.HTTPNotFound(
+ 'The trailing path %r is not allowed' % environ['PATH_INFO'])
+ return exc.wsgi_application(environ, start_response)
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.directory)
+
+def make_static(global_conf, document_root, cache_max_age=None):
+ """
+ Return a WSGI application that serves a directory (configured
+ with document_root)
+
+ cache_max_age - integer specifies CACHE_CONTROL max_age in seconds
+ """
+ if cache_max_age is not None:
+ cache_max_age = int(cache_max_age)
+ return StaticURLParser(
+ document_root, cache_max_age=cache_max_age)
+
+class PkgResourcesParser(StaticURLParser):
+
+ def __init__(self, egg_or_spec, resource_name, manager=None, root_resource=None):
+ if pkg_resources is None:
+ raise NotImplementedError("This class requires pkg_resources.")
+ if isinstance(egg_or_spec, (str, unicode)):
+ self.egg = pkg_resources.get_distribution(egg_or_spec)
+ else:
+ self.egg = egg_or_spec
+ self.resource_name = resource_name
+ if manager is None:
+ manager = pkg_resources.ResourceManager()
+ self.manager = manager
+ if root_resource is None:
+ root_resource = resource_name
+ self.root_resource = os.path.normpath(root_resource)
+
+ def __repr__(self):
+ return '<%s for %s:%r>' % (
+ self.__class__.__name__,
+ self.egg.project_name,
+ self.resource_name)
+
+ def __call__(self, environ, start_response):
+ path_info = environ.get('PATH_INFO', '')
+ if not path_info:
+ return self.add_slash(environ, start_response)
+ if path_info == '/':
+ # @@: This should obviously be configurable
+ filename = 'index.html'
+ else:
+ filename = request.path_info_pop(environ)
+ resource = os.path.normcase(os.path.normpath(
+ self.resource_name + '/' + filename))
+ if self.root_resource is not None and not resource.startswith(self.root_resource):
+ # Out of bounds
+ return self.not_found(environ, start_response)
+ if not self.egg.has_resource(resource):
+ return self.not_found(environ, start_response)
+ if self.egg.resource_isdir(resource):
+ # @@: Cache?
+ child_root = self.root_resource is not None and self.root_resource or \
+ self.resource_name
+ return self.__class__(self.egg, resource, self.manager,
+ root_resource=child_root)(environ, start_response)
+ if environ.get('PATH_INFO') and environ.get('PATH_INFO') != '/':
+ return self.error_extra_path(environ, start_response)
+
+ type, encoding = mimetypes.guess_type(resource)
+ if not type:
+ type = 'application/octet-stream'
+ # @@: I don't know what to do with the encoding.
+ try:
+ file = self.egg.get_resource_stream(self.manager, resource)
+ except (IOError, OSError), e:
+ exc = httpexceptions.HTTPForbidden(
+ 'You are not permitted to view this file (%s)' % e)
+ return exc.wsgi_application(environ, start_response)
+ start_response('200 OK',
+ [('content-type', type)])
+ return fileapp._FileIter(file)
+
+ def not_found(self, environ, start_response, debug_message=None):
+ exc = httpexceptions.HTTPNotFound(
+ 'The resource at %s could not be found'
+ % request.construct_url(environ),
+ comment='SCRIPT_NAME=%r; PATH_INFO=%r; looking in egg:%s#%r; debug: %s'
+ % (environ.get('SCRIPT_NAME'), environ.get('PATH_INFO'),
+ self.egg, self.resource_name, debug_message or '(none)'))
+ return exc.wsgi_application(environ, start_response)
+
+def make_pkg_resources(global_conf, egg, resource_name=''):
+ """
+ A static file parser that loads data from an egg using
+ ``pkg_resources``. Takes a configuration value ``egg``, which is
+ an egg spec, and a base ``resource_name`` (default empty string)
+ which is the path in the egg that this starts at.
+ """
+ if pkg_resources is None:
+ raise NotImplementedError("This function requires pkg_resources.")
+ return PkgResourcesParser(egg, resource_name)
+
+def make_url_parser(global_conf, directory, base_python_name,
+ index_names=None, hide_extensions=None,
+ ignore_extensions=None,
+ **constructor_conf):
+ """
+ Create a URLParser application that looks in ``directory``, which
+ should be the directory for the Python package named in
+ ``base_python_name``. ``index_names`` are used when viewing the
+ directory (like ``'index'`` for ``'index.html'``).
+ ``hide_extensions`` are extensions that are not viewable (like
+ ``'.pyc'``) and ``ignore_extensions`` are viewable but only if an
+ explicit extension is given.
+ """
+ if index_names is None:
+ index_names = global_conf.get(
+ 'index_names', ('index', 'Index', 'main', 'Main'))
+ index_names = converters.aslist(index_names)
+
+ if hide_extensions is None:
+ hide_extensions = global_conf.get(
+ 'hide_extensions', ('.pyc', 'bak', 'py~'))
+ hide_extensions = converters.aslist(hide_extensions)
+
+ if ignore_extensions is None:
+ ignore_extensions = global_conf.get(
+ 'ignore_extensions', ())
+ ignore_extensions = converters.aslist(ignore_extensions)
+ # There's no real way to set constructors currently...
+
+ return URLParser({}, directory, base_python_name,
+ index_names=index_names,
+ hide_extensions=hide_extensions,
+ ignore_extensions=ignore_extensions,
+ **constructor_conf)
diff --git a/paste/util/PySourceColor.py b/paste/util/PySourceColor.py
new file mode 100644
index 0000000..65406ec
--- /dev/null
+++ b/paste/util/PySourceColor.py
@@ -0,0 +1,2103 @@
+# -*- coding: Latin-1 -*-
+"""
+PySourceColor: color Python source code
+"""
+
+"""
+ PySourceColor.py
+
+----------------------------------------------------------------------------
+
+ A python source to colorized html/css/xhtml converter.
+ Hacked by M.E.Farmer Jr. 2004, 2005
+ Python license
+
+----------------------------------------------------------------------------
+
+ - HTML markup does not create w3c valid html, but it works on every
+ browser i've tried so far.(I.E.,Mozilla/Firefox,Opera,Konqueror,wxHTML).
+ - CSS markup is w3c validated html 4.01 strict,
+ but will not render correctly on all browsers.
+ - XHTML markup is w3c validated xhtml 1.0 strict,
+ like html 4.01, will not render correctly on all browsers.
+
+----------------------------------------------------------------------------
+
+Features:
+
+ -Three types of markup:
+ html (default)
+ css/html 4.01 strict
+ xhtml 1.0 strict
+
+ -Can tokenize and colorize:
+ 12 types of strings
+ 2 comment types
+ numbers
+ operators
+ brackets
+ math operators
+ class / name
+ def / name
+ decorator / name
+ keywords
+ arguments class/def/decorator
+ linenumbers
+ names
+ text
+
+ -Eight colorschemes built-in:
+ null
+ mono
+ lite (default)
+ dark
+ dark2
+ idle
+ viewcvs
+ pythonwin
+
+ -Header and footer
+ set to '' for builtin header / footer.
+ give path to a file containing the html
+ you want added as header or footer.
+
+ -Arbitrary text and html
+ html markup converts all to raw (TEXT token)
+ #@# for raw -> send raw text.
+ #$# for span -> inline html and text.
+ #%# for div -> block level html and text.
+
+ -Linenumbers
+ Supports all styles. New token is called LINENUMBER.
+ Defaults to NAME if not defined.
+
+ Style options
+
+ -ALL markups support these text styles:
+ b = bold
+ i = italic
+ u = underline
+ -CSS and XHTML has limited support for borders:
+ HTML markup functions will ignore these.
+ Optional: Border color in RGB hex
+ Defaults to the text forecolor.
+ #rrggbb = border color
+ Border size:
+ l = thick
+ m = medium
+ t = thin
+ Border type:
+ - = dashed
+ . = dotted
+ s = solid
+ d = double
+ g = groove
+ r = ridge
+ n = inset
+ o = outset
+ You can specify multiple sides,
+ they will all use the same style.
+ Optional: Default is full border.
+ v = bottom
+ < = left
+ > = right
+ ^ = top
+ NOTE: Specify the styles you want.
+ The markups will ignore unsupported styles
+ Also note not all browsers can show these options
+
+ -All tokens default to NAME if not defined
+ so the only absolutely critical ones to define are:
+ NAME, ERRORTOKEN, PAGEBACKGROUND
+
+----------------------------------------------------------------------------
+
+Example usage::
+
+ # import
+ import PySourceColor as psc
+ psc.convert('c:/Python22/PySourceColor.py', colors=psc.idle, show=1)
+
+ # from module import *
+ from PySourceColor import *
+ convert('c:/Python22/Lib', colors=lite, markup="css",
+ header='#$#<b>This is a simpe heading</b><hr/>')
+
+ # How to use a custom colorscheme, and most of the 'features'
+ from PySourceColor import *
+ new = {
+ ERRORTOKEN: ('bui','#FF8080',''),
+ DECORATOR_NAME: ('s','#AACBBC',''),
+ DECORATOR: ('n','#333333',''),
+ NAME: ('t.<v','#1133AA','#DDFF22'),
+ NUMBER: ('','#236676','#FF5555'),
+ OPERATOR: ('b','#454567','#BBBB11'),
+ MATH_OPERATOR: ('','#935623','#423afb'),
+ BRACKETS: ('b','#ac34bf','#6457a5'),
+ COMMENT: ('t-#0022FF','#545366','#AABBFF'),
+ DOUBLECOMMENT: ('<l#553455','#553455','#FF00FF'),
+ CLASS_NAME: ('m^v-','#000000','#FFFFFF'),
+ DEF_NAME: ('l=<v','#897845','#000022'),
+ KEYWORD: ('.b','#345345','#FFFF22'),
+ SINGLEQUOTE: ('mn','#223344','#AADDCC'),
+ SINGLEQUOTE_R: ('','#344522',''),
+ SINGLEQUOTE_U: ('','#234234',''),
+ DOUBLEQUOTE: ('m#0022FF','#334421',''),
+ DOUBLEQUOTE_R: ('','#345345',''),
+ DOUBLEQUOTE_U: ('','#678673',''),
+ TRIPLESINGLEQUOTE: ('tv','#FFFFFF','#000000'),
+ TRIPLESINGLEQUOTE_R: ('tbu','#443256','#DDFFDA'),
+ TRIPLESINGLEQUOTE_U: ('','#423454','#DDFFDA'),
+ TRIPLEDOUBLEQUOTE: ('li#236fd3b<>','#000000','#FFFFFF'),
+ TRIPLEDOUBLEQUOTE_R: ('tub','#000000','#FFFFFF'),
+ TRIPLEDOUBLEQUOTE_U: ('-', '#CCAABB','#FFFAFF'),
+ LINENUMBER: ('ib-','#ff66aa','#7733FF'),]
+ TEXT: ('','#546634',''),
+ PAGEBACKGROUND: '#FFFAAA',
+ }
+ if __name__ == '__main__':
+ import sys
+ convert(sys.argv[1], './xhtml.html', colors=new, markup='xhtml', show=1,
+ linenumbers=1)
+ convert(sys.argv[1], './html.html', colors=new, markup='html', show=1,
+ linenumbers=1)
+
+"""
+
+__all__ = ['ERRORTOKEN','DECORATOR_NAME', 'DECORATOR', 'ARGS', 'EXTRASPACE',
+ 'NAME', 'NUMBER', 'OPERATOR', 'COMMENT', 'MATH_OPERATOR',
+ 'DOUBLECOMMENT', 'CLASS_NAME', 'DEF_NAME', 'KEYWORD', 'BRACKETS',
+ 'SINGLEQUOTE','SINGLEQUOTE_R','SINGLEQUOTE_U','DOUBLEQUOTE',
+ 'DOUBLEQUOTE_R', 'DOUBLEQUOTE_U', 'TRIPLESINGLEQUOTE', 'TEXT',
+ 'TRIPLESINGLEQUOTE_R', 'TRIPLESINGLEQUOTE_U', 'TRIPLEDOUBLEQUOTE',
+ 'TRIPLEDOUBLEQUOTE_R', 'TRIPLEDOUBLEQUOTE_U', 'PAGEBACKGROUND',
+ 'LINENUMBER', 'CODESTART', 'CODEEND', 'PY', 'TOKEN_NAMES', 'CSSHOOK',
+ 'null', 'mono', 'lite', 'dark','dark2', 'pythonwin','idle',
+ 'viewcvs', 'Usage', 'cli', 'str2stdout', 'path2stdout', 'Parser',
+ 'str2file', 'str2html', 'str2css', 'str2markup', 'path2file',
+ 'path2html', 'convert', 'walkdir', 'defaultColors', 'showpage',
+ 'pageconvert','tagreplace', 'MARKUPDICT']
+__title__ = 'PySourceColor'
+__version__ = "2.1a"
+__date__ = '25 April 2005'
+__author__ = "M.E.Farmer Jr."
+__credits__ = '''This was originally based on a python recipe
+submitted by Jürgen Hermann to ASPN. Now based on the voices in my head.
+M.E.Farmer 2004, 2005
+Python license
+'''
+import os
+import sys
+import time
+import glob
+import getopt
+import keyword
+import token
+import tokenize
+import traceback
+try :
+ import cStringIO as StringIO
+except:
+ import StringIO
+# Do not edit
+NAME = token.NAME
+NUMBER = token.NUMBER
+COMMENT = tokenize.COMMENT
+OPERATOR = token.OP
+ERRORTOKEN = token.ERRORTOKEN
+ARGS = token.NT_OFFSET + 1
+DOUBLECOMMENT = token.NT_OFFSET + 2
+CLASS_NAME = token.NT_OFFSET + 3
+DEF_NAME = token.NT_OFFSET + 4
+KEYWORD = token.NT_OFFSET + 5
+SINGLEQUOTE = token.NT_OFFSET + 6
+SINGLEQUOTE_R = token.NT_OFFSET + 7
+SINGLEQUOTE_U = token.NT_OFFSET + 8
+DOUBLEQUOTE = token.NT_OFFSET + 9
+DOUBLEQUOTE_R = token.NT_OFFSET + 10
+DOUBLEQUOTE_U = token.NT_OFFSET + 11
+TRIPLESINGLEQUOTE = token.NT_OFFSET + 12
+TRIPLESINGLEQUOTE_R = token.NT_OFFSET + 13
+TRIPLESINGLEQUOTE_U = token.NT_OFFSET + 14
+TRIPLEDOUBLEQUOTE = token.NT_OFFSET + 15
+TRIPLEDOUBLEQUOTE_R = token.NT_OFFSET + 16
+TRIPLEDOUBLEQUOTE_U = token.NT_OFFSET + 17
+PAGEBACKGROUND = token.NT_OFFSET + 18
+DECORATOR = token.NT_OFFSET + 19
+DECORATOR_NAME = token.NT_OFFSET + 20
+BRACKETS = token.NT_OFFSET + 21
+MATH_OPERATOR = token.NT_OFFSET + 22
+LINENUMBER = token.NT_OFFSET + 23
+TEXT = token.NT_OFFSET + 24
+PY = token.NT_OFFSET + 25
+CODESTART = token.NT_OFFSET + 26
+CODEEND = token.NT_OFFSET + 27
+CSSHOOK = token.NT_OFFSET + 28
+EXTRASPACE = token.NT_OFFSET + 29
+
+# markup classname lookup
+MARKUPDICT = {
+ ERRORTOKEN: 'py_err',
+ DECORATOR_NAME: 'py_decn',
+ DECORATOR: 'py_dec',
+ ARGS: 'py_args',
+ NAME: 'py_name',
+ NUMBER: 'py_num',
+ OPERATOR: 'py_op',
+ COMMENT: 'py_com',
+ DOUBLECOMMENT: 'py_dcom',
+ CLASS_NAME: 'py_clsn',
+ DEF_NAME: 'py_defn',
+ KEYWORD: 'py_key',
+ SINGLEQUOTE: 'py_sq',
+ SINGLEQUOTE_R: 'py_sqr',
+ SINGLEQUOTE_U: 'py_squ',
+ DOUBLEQUOTE: 'py_dq',
+ DOUBLEQUOTE_R: 'py_dqr',
+ DOUBLEQUOTE_U: 'py_dqu',
+ TRIPLESINGLEQUOTE: 'py_tsq',
+ TRIPLESINGLEQUOTE_R: 'py_tsqr',
+ TRIPLESINGLEQUOTE_U: 'py_tsqu',
+ TRIPLEDOUBLEQUOTE: 'py_tdq',
+ TRIPLEDOUBLEQUOTE_R: 'py_tdqr',
+ TRIPLEDOUBLEQUOTE_U: 'py_tdqu',
+ BRACKETS: 'py_bra',
+ MATH_OPERATOR: 'py_mop',
+ LINENUMBER: 'py_lnum',
+ TEXT: 'py_text',
+ }
+# might help users that want to create custom schemes
+TOKEN_NAMES= {
+ ERRORTOKEN:'ERRORTOKEN',
+ DECORATOR_NAME:'DECORATOR_NAME',
+ DECORATOR:'DECORATOR',
+ ARGS:'ARGS',
+ NAME:'NAME',
+ NUMBER:'NUMBER',
+ OPERATOR:'OPERATOR',
+ COMMENT:'COMMENT',
+ DOUBLECOMMENT:'DOUBLECOMMENT',
+ CLASS_NAME:'CLASS_NAME',
+ DEF_NAME:'DEF_NAME',
+ KEYWORD:'KEYWORD',
+ SINGLEQUOTE:'SINGLEQUOTE',
+ SINGLEQUOTE_R:'SINGLEQUOTE_R',
+ SINGLEQUOTE_U:'SINGLEQUOTE_U',
+ DOUBLEQUOTE:'DOUBLEQUOTE',
+ DOUBLEQUOTE_R:'DOUBLEQUOTE_R',
+ DOUBLEQUOTE_U:'DOUBLEQUOTE_U',
+ TRIPLESINGLEQUOTE:'TRIPLESINGLEQUOTE',
+ TRIPLESINGLEQUOTE_R:'TRIPLESINGLEQUOTE_R',
+ TRIPLESINGLEQUOTE_U:'TRIPLESINGLEQUOTE_U',
+ TRIPLEDOUBLEQUOTE:'TRIPLEDOUBLEQUOTE',
+ TRIPLEDOUBLEQUOTE_R:'TRIPLEDOUBLEQUOTE_R',
+ TRIPLEDOUBLEQUOTE_U:'TRIPLEDOUBLEQUOTE_U',
+ BRACKETS:'BRACKETS',
+ MATH_OPERATOR:'MATH_OPERATOR',
+ LINENUMBER:'LINENUMBER',
+ TEXT:'TEXT',
+ PAGEBACKGROUND:'PAGEBACKGROUND',
+ }
+
+######################################################################
+# Edit colors and styles to taste
+# Create your own scheme, just copy one below , rename and edit.
+# Custom styles must at least define NAME, ERRORTOKEN, PAGEBACKGROUND,
+# all missing elements will default to NAME.
+# See module docstring for details on style attributes.
+######################################################################
+# Copy null and use it as a starter colorscheme.
+null = {# tokentype: ('tags border_color', 'textforecolor', 'textbackcolor')
+ ERRORTOKEN: ('','#000000',''),# Error token
+ DECORATOR_NAME: ('','#000000',''),# Decorator name
+ DECORATOR: ('','#000000',''),# @ symbol
+ ARGS: ('','#000000',''),# class,def,deco arguments
+ NAME: ('','#000000',''),# All other python text
+ NUMBER: ('','#000000',''),# 0->10
+ OPERATOR: ('','#000000',''),# ':','<=',';',',','.','==', etc
+ MATH_OPERATOR: ('','#000000',''),# '+','-','=','','**',etc
+ BRACKETS: ('','#000000',''),# '[',']','(',')','{','}'
+ COMMENT: ('','#000000',''),# Single comment
+ DOUBLECOMMENT: ('','#000000',''),## Double comment
+ CLASS_NAME: ('','#000000',''),# Class name
+ DEF_NAME: ('','#000000',''),# Def name
+ KEYWORD: ('','#000000',''),# Python keywords
+ SINGLEQUOTE: ('','#000000',''),# 'SINGLEQUOTE'
+ SINGLEQUOTE_R: ('','#000000',''),# r'SINGLEQUOTE'
+ SINGLEQUOTE_U: ('','#000000',''),# u'SINGLEQUOTE'
+ DOUBLEQUOTE: ('','#000000',''),# "DOUBLEQUOTE"
+ DOUBLEQUOTE_R: ('','#000000',''),# r"DOUBLEQUOTE"
+ DOUBLEQUOTE_U: ('','#000000',''),# u"DOUBLEQUOTE"
+ TRIPLESINGLEQUOTE: ('','#000000',''),# '''TRIPLESINGLEQUOTE'''
+ TRIPLESINGLEQUOTE_R: ('','#000000',''),# r'''TRIPLESINGLEQUOTE'''
+ TRIPLESINGLEQUOTE_U: ('','#000000',''),# u'''TRIPLESINGLEQUOTE'''
+ TRIPLEDOUBLEQUOTE: ('','#000000',''),# """TRIPLEDOUBLEQUOTE"""
+ TRIPLEDOUBLEQUOTE_R: ('','#000000',''),# r"""TRIPLEDOUBLEQUOTE"""
+ TRIPLEDOUBLEQUOTE_U: ('','#000000',''),# u"""TRIPLEDOUBLEQUOTE"""
+ TEXT: ('','#000000',''),# non python text
+ LINENUMBER: ('>ti#555555','#000000',''),# Linenumbers
+ PAGEBACKGROUND: '#FFFFFF'# set the page background
+ }
+
+mono = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('bu','#000000',''),
+ DECORATOR: ('b','#000000',''),
+ ARGS: ('b','#555555',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('b','#000000',''),
+ OPERATOR: ('b','#000000',''),
+ MATH_OPERATOR: ('b','#000000',''),
+ BRACKETS: ('b','#000000',''),
+ COMMENT: ('i','#999999',''),
+ DOUBLECOMMENT: ('b','#999999',''),
+ CLASS_NAME: ('bu','#000000',''),
+ DEF_NAME: ('b','#000000',''),
+ KEYWORD: ('b','#000000',''),
+ SINGLEQUOTE: ('','#000000',''),
+ SINGLEQUOTE_R: ('','#000000',''),
+ SINGLEQUOTE_U: ('','#000000',''),
+ DOUBLEQUOTE: ('','#000000',''),
+ DOUBLEQUOTE_R: ('','#000000',''),
+ DOUBLEQUOTE_U: ('','#000000',''),
+ TRIPLESINGLEQUOTE: ('','#000000',''),
+ TRIPLESINGLEQUOTE_R: ('','#000000',''),
+ TRIPLESINGLEQUOTE_U: ('','#000000',''),
+ TRIPLEDOUBLEQUOTE: ('i','#000000',''),
+ TRIPLEDOUBLEQUOTE_R: ('i','#000000',''),
+ TRIPLEDOUBLEQUOTE_U: ('i','#000000',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+dark = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#FFBBAA',''),
+ DECORATOR: ('b','#CC5511',''),
+ ARGS: ('b','#DDDDFF',''),
+ NAME: ('','#DDDDDD',''),
+ NUMBER: ('','#FF0000',''),
+ OPERATOR: ('b','#FAF785',''),
+ MATH_OPERATOR: ('b','#FAF785',''),
+ BRACKETS: ('b','#FAF785',''),
+ COMMENT: ('','#45FCA0',''),
+ DOUBLECOMMENT: ('i','#A7C7A9',''),
+ CLASS_NAME: ('b','#B666FD',''),
+ DEF_NAME: ('b','#EBAE5C',''),
+ KEYWORD: ('b','#8680FF',''),
+ SINGLEQUOTE: ('','#F8BAFE',''),
+ SINGLEQUOTE_R: ('','#F8BAFE',''),
+ SINGLEQUOTE_U: ('','#F8BAFE',''),
+ DOUBLEQUOTE: ('','#FF80C0',''),
+ DOUBLEQUOTE_R: ('','#FF80C0',''),
+ DOUBLEQUOTE_U: ('','#FF80C0',''),
+ TRIPLESINGLEQUOTE: ('','#FF9595',''),
+ TRIPLESINGLEQUOTE_R: ('','#FF9595',''),
+ TRIPLESINGLEQUOTE_U: ('','#FF9595',''),
+ TRIPLEDOUBLEQUOTE: ('','#B3FFFF',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#B3FFFF',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#B3FFFF',''),
+ TEXT: ('','#FFFFFF',''),
+ LINENUMBER: ('>mi#555555','#bbccbb','#333333'),
+ PAGEBACKGROUND: '#000000'
+ }
+
+dark2 = {
+ ERRORTOKEN: ('','#FF0000',''),
+ DECORATOR_NAME: ('b','#FFBBAA',''),
+ DECORATOR: ('b','#CC5511',''),
+ ARGS: ('b','#DDDDDD',''),
+ NAME: ('','#C0C0C0',''),
+ NUMBER: ('b','#00FF00',''),
+ OPERATOR: ('b','#FF090F',''),
+ MATH_OPERATOR: ('b','#EE7020',''),
+ BRACKETS: ('b','#FFB90F',''),
+ COMMENT: ('i','#D0D000','#522000'),#'#88AA88','#11111F'),
+ DOUBLECOMMENT: ('i','#D0D000','#522000'),#'#77BB77','#11111F'),
+ CLASS_NAME: ('b','#DD4080',''),
+ DEF_NAME: ('b','#FF8040',''),
+ KEYWORD: ('b','#4726d1',''),
+ SINGLEQUOTE: ('','#8080C0',''),
+ SINGLEQUOTE_R: ('','#8080C0',''),
+ SINGLEQUOTE_U: ('','#8080C0',''),
+ DOUBLEQUOTE: ('','#ADB9F1',''),
+ DOUBLEQUOTE_R: ('','#ADB9F1',''),
+ DOUBLEQUOTE_U: ('','#ADB9F1',''),
+ TRIPLESINGLEQUOTE: ('','#00C1C1',''),#A050C0
+ TRIPLESINGLEQUOTE_R: ('','#00C1C1',''),#A050C0
+ TRIPLESINGLEQUOTE_U: ('','#00C1C1',''),#A050C0
+ TRIPLEDOUBLEQUOTE: ('','#33E3E3',''),#B090E0
+ TRIPLEDOUBLEQUOTE_R: ('','#33E3E3',''),#B090E0
+ TRIPLEDOUBLEQUOTE_U: ('','#33E3E3',''),#B090E0
+ TEXT: ('','#C0C0C0',''),
+ LINENUMBER: ('>mi#555555','#bbccbb','#333333'),
+ PAGEBACKGROUND: '#000000'
+ }
+
+lite = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#BB4422',''),
+ DECORATOR: ('b','#3333AF',''),
+ ARGS: ('b','#000000',''),
+ NAME: ('','#333333',''),
+ NUMBER: ('b','#DD2200',''),
+ OPERATOR: ('b','#000000',''),
+ MATH_OPERATOR: ('b','#000000',''),
+ BRACKETS: ('b','#000000',''),
+ COMMENT: ('','#007F00',''),
+ DOUBLECOMMENT: ('','#608060',''),
+ CLASS_NAME: ('b','#0000DF',''),
+ DEF_NAME: ('b','#9C7A00',''),#f09030
+ KEYWORD: ('b','#0000AF',''),
+ SINGLEQUOTE: ('','#600080',''),
+ SINGLEQUOTE_R: ('','#600080',''),
+ SINGLEQUOTE_U: ('','#600080',''),
+ DOUBLEQUOTE: ('','#A0008A',''),
+ DOUBLEQUOTE_R: ('','#A0008A',''),
+ DOUBLEQUOTE_U: ('','#A0008A',''),
+ TRIPLESINGLEQUOTE: ('','#337799',''),
+ TRIPLESINGLEQUOTE_R: ('','#337799',''),
+ TRIPLESINGLEQUOTE_U: ('','#337799',''),
+ TRIPLEDOUBLEQUOTE: ('','#1166AA',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#1166AA',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#1166AA',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+idle = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('','#900090',''),
+ DECORATOR: ('','#FF7700',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('','#000000',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('','#DD0000',''),
+ DOUBLECOMMENT: ('','#DD0000',''),
+ CLASS_NAME: ('','#0000FF',''),
+ DEF_NAME: ('','#0000FF',''),
+ KEYWORD: ('','#FF7700',''),
+ SINGLEQUOTE: ('','#00AA00',''),
+ SINGLEQUOTE_R: ('','#00AA00',''),
+ SINGLEQUOTE_U: ('','#00AA00',''),
+ DOUBLEQUOTE: ('','#00AA00',''),
+ DOUBLEQUOTE_R: ('','#00AA00',''),
+ DOUBLEQUOTE_U: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE_R: ('','#00AA00',''),
+ TRIPLESINGLEQUOTE_U: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#00AA00',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#00AA00',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+pythonwin = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('b','#DD0080',''),
+ DECORATOR: ('b','#000080',''),
+ ARGS: ('','#000000',''),
+ NAME: ('','#303030',''),
+ NUMBER: ('','#008080',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('','#007F00',''),
+ DOUBLECOMMENT: ('','#7F7F7F',''),
+ CLASS_NAME: ('b','#0000FF',''),
+ DEF_NAME: ('b','#007F7F',''),
+ KEYWORD: ('b','#000080',''),
+ SINGLEQUOTE: ('','#808000',''),
+ SINGLEQUOTE_R: ('','#808000',''),
+ SINGLEQUOTE_U: ('','#808000',''),
+ DOUBLEQUOTE: ('','#808000',''),
+ DOUBLEQUOTE_R: ('','#808000',''),
+ DOUBLEQUOTE_U: ('','#808000',''),
+ TRIPLESINGLEQUOTE: ('','#808000',''),
+ TRIPLESINGLEQUOTE_R: ('','#808000',''),
+ TRIPLESINGLEQUOTE_U: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE_R: ('','#808000',''),
+ TRIPLEDOUBLEQUOTE_U: ('','#808000',''),
+ TEXT: ('','#303030',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+viewcvs = {
+ ERRORTOKEN: ('s#FF0000','#FF8080',''),
+ DECORATOR_NAME: ('','#000000',''),
+ DECORATOR: ('','#000000',''),
+ ARGS: ('','#000000',''),
+ NAME: ('','#000000',''),
+ NUMBER: ('','#000000',''),
+ OPERATOR: ('','#000000',''),
+ MATH_OPERATOR: ('','#000000',''),
+ BRACKETS: ('','#000000',''),
+ COMMENT: ('i','#b22222',''),
+ DOUBLECOMMENT: ('i','#b22222',''),
+ CLASS_NAME: ('','#000000',''),
+ DEF_NAME: ('b','#0000ff',''),
+ KEYWORD: ('b','#a020f0',''),
+ SINGLEQUOTE: ('b','#bc8f8f',''),
+ SINGLEQUOTE_R: ('b','#bc8f8f',''),
+ SINGLEQUOTE_U: ('b','#bc8f8f',''),
+ DOUBLEQUOTE: ('b','#bc8f8f',''),
+ DOUBLEQUOTE_R: ('b','#bc8f8f',''),
+ DOUBLEQUOTE_U: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE_R: ('b','#bc8f8f',''),
+ TRIPLESINGLEQUOTE_U: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE_R: ('b','#bc8f8f',''),
+ TRIPLEDOUBLEQUOTE_U: ('b','#bc8f8f',''),
+ TEXT: ('','#000000',''),
+ LINENUMBER: ('>ti#555555','#000000',''),
+ PAGEBACKGROUND: '#FFFFFF'
+ }
+
+defaultColors = lite
+
+def Usage():
+ doc = """
+ -----------------------------------------------------------------------------
+ PySourceColor.py ver: %s
+ -----------------------------------------------------------------------------
+ Module summary:
+ This module is designed to colorize python source code.
+ Input--->python source
+ Output-->colorized (html, html4.01/css, xhtml1.0)
+ Standalone:
+ This module will work from the command line with options.
+ This module will work with redirected stdio.
+ Imported:
+ This module can be imported and used directly in your code.
+ -----------------------------------------------------------------------------
+ Command line options:
+ -h, --help
+ Optional-> Display this help message.
+ -t, --test
+ Optional-> Will ignore all others flags but --profile
+ test all schemes and markup combinations
+ -p, --profile
+ Optional-> Works only with --test or -t
+ runs profile.py and makes the test work in quiet mode.
+ -i, --in, --input
+ Optional-> If you give input on stdin.
+ Use any of these for the current dir (.,cwd)
+ Input can be file or dir.
+ Input from stdin use one of the following (-,stdin)
+ If stdin is used as input stdout is output unless specified.
+ -o, --out, --output
+ Optional-> output dir for the colorized source.
+ default: output dir is the input dir.
+ To output html to stdout use one of the following (-,stdout)
+ Stdout can be used without stdin if you give a file as input.
+ -c, --color
+ Optional-> null, mono, dark, dark2, lite, idle, pythonwin, viewcvs
+ default: dark
+ -s, --show
+ Optional-> Show page after creation.
+ default: no show
+ -m, --markup
+ Optional-> html, css, xhtml
+ css, xhtml also support external stylesheets (-e,--external)
+ default: HTML
+ -e, --external
+ Optional-> use with css, xhtml
+ Writes an style sheet instead of embedding it in the page
+ saves it as pystyle.css in the same directory.
+ html markup will silently ignore this flag.
+ -H, --header
+ Opional-> add a page header to the top of the output
+ -H
+ Builtin header (name,date,hrule)
+ --header
+ You must specify a filename.
+ The header file must be valid html
+ and must handle its own font colors.
+ ex. --header c:/tmp/header.txt
+ -F, --footer
+ Opional-> add a page footer to the bottom of the output
+ -F
+ Builtin footer (hrule,name,date)
+ --footer
+ You must specify a filename.
+ The footer file must be valid html
+ and must handle its own font colors.
+ ex. --footer c:/tmp/footer.txt
+ -l, --linenumbers
+ Optional-> default is no linenumbers
+ Adds line numbers to the start of each line in the code.
+ --convertpage
+ Given a webpage that has code embedded in tags it will
+ convert embedded code to colorized html.
+ (see pageconvert for details)
+ -----------------------------------------------------------------------------
+ Option usage:
+ # Test and show pages
+ python PySourceColor.py -t -s
+ # Test and only show profile results
+ python PySourceColor.py -t -p
+ # Colorize all .py,.pyw files in cwdir you can also use: (.,cwd)
+ python PySourceColor.py -i .
+ # Using long options w/ =
+ python PySourceColor.py --in=c:/myDir/my.py --color=lite --show
+ # Using short options w/out =
+ python PySourceColor.py -i c:/myDir/ -c idle -m css -e
+ # Using any mix
+ python PySourceColor.py --in . -o=c:/myDir --show
+ # Place a custom header on your files
+ python PySourceColor.py -i . -o c:/tmp -m xhtml --header c:/header.txt
+ -----------------------------------------------------------------------------
+ Stdio usage:
+ # Stdio using no options
+ python PySourceColor.py < c:/MyFile.py > c:/tmp/MyFile.html
+ # Using stdin alone automatically uses stdout for output: (stdin,-)
+ python PySourceColor.py -i- < c:/MyFile.py > c:/tmp/myfile.html
+ # Stdout can also be written to directly from a file instead of stdin
+ python PySourceColor.py -i c:/MyFile.py -m css -o- > c:/tmp/myfile.html
+ # Stdin can be used as input , but output can still be specified
+ python PySourceColor.py -i- -o c:/pydoc.py.html -s < c:/Python22/my.py
+ _____________________________________________________________________________
+ """
+ print doc % (__version__)
+ sys.exit(1)
+
+###################################################### Command line interface
+
+def cli():
+ """Handle command line args and redirections"""
+ try:
+ # try to get command line args
+ opts, args = getopt.getopt(sys.argv[1:],
+ "hseqtplHFi:o:c:m:h:f:",["help", "show", "quiet",
+ "test", "external", "linenumbers", "convertpage", "profile",
+ "input=", "output=", "color=", "markup=","header=", "footer="])
+ except getopt.GetoptError:
+ # on error print help information and exit:
+ Usage()
+ # init some names
+ input = None
+ output = None
+ colorscheme = None
+ markup = 'html'
+ header = None
+ footer = None
+ linenumbers = 0
+ show = 0
+ quiet = 0
+ test = 0
+ profile = 0
+ convertpage = 0
+ form = None
+ # if we have args then process them
+ for o, a in opts:
+ if o in ["-h", "--help"]:
+ Usage()
+ sys.exit()
+ if o in ["-o", "--output", "--out"]:
+ output = a
+ if o in ["-i", "--input", "--in"]:
+ input = a
+ if input in [".", "cwd"]:
+ input = os.getcwd()
+ if o in ["-s", "--show"]:
+ show = 1
+ if o in ["-q", "--quiet"]:
+ quiet = 1
+ if o in ["-t", "--test"]:
+ test = 1
+ if o in ["--convertpage"]:
+ convertpage = 1
+ if o in ["-p", "--profile"]:
+ profile = 1
+ if o in ["-e", "--external"]:
+ form = 'external'
+ if o in ["-m", "--markup"]:
+ markup = str(a)
+ if o in ["-l", "--linenumbers"]:
+ linenumbers = 1
+ if o in ["--header"]:
+ header = str(a)
+ elif o == "-H":
+ header = ''
+ if o in ["--footer"]:
+ footer = str(a)
+ elif o == "-F":
+ footer = ''
+ if o in ["-c", "--color"]:
+ try:
+ colorscheme = globals().get(a.lower())
+ except:
+ traceback.print_exc()
+ Usage()
+ if test:
+ if profile:
+ import profile
+ profile.run('_test(show=%s, quiet=%s)'%(show,quiet))
+ else:
+ # Parse this script in every possible colorscheme and markup
+ _test(show,quiet)
+ elif input in [None, "-", "stdin"] or output in ["-", "stdout"]:
+ # determine if we are going to use stdio
+ if input not in [None, "-", "stdin"]:
+ if os.path.isfile(input) :
+ path2stdout(input, colors=colorscheme, markup=markup,
+ linenumbers=linenumbers, header=header,
+ footer=footer, form=form)
+ else:
+ raise PathError, 'File does not exists!'
+ else:
+ try:
+ if sys.stdin.isatty():
+ raise InputError, 'Please check input!'
+ else:
+ if output in [None,"-","stdout"]:
+ str2stdout(sys.stdin.read(), colors=colorscheme,
+ markup=markup, header=header,
+ footer=footer, linenumbers=linenumbers,
+ form=form)
+ else:
+ str2file(sys.stdin.read(), outfile=output, show=show,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers, form=form)
+ except:
+ traceback.print_exc()
+ Usage()
+ else:
+ if os.path.exists(input):
+ if convertpage:
+ # if there was at least an input given we can proceed
+ pageconvert(input, out=output, colors=colorscheme,
+ show=show, markup=markup,linenumbers=linenumbers)
+ else:
+ # if there was at least an input given we can proceed
+ convert(source=input, outdir=output, colors=colorscheme,
+ show=show, markup=markup, quiet=quiet, header=header,
+ footer=footer, linenumbers=linenumbers, form=form)
+ else:
+ raise PathError, 'File does not exists!'
+ Usage()
+
+######################################################### Simple markup tests
+
+def _test(show=0, quiet=0):
+ """Test the parser and most of the functions.
+
+ There are 19 test total(eight colorschemes in three diffrent markups,
+ and a str2file test. Most functions are tested by this.
+ """
+ fi = sys.argv[0]
+ if not fi.endswith('.exe'):# Do not test if frozen as an archive
+ # this is a collection of test, most things are covered.
+ path2file(fi, '/tmp/null.html', null, show=show, quiet=quiet)
+ path2file(fi, '/tmp/null_css.html', null, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/mono.html', mono, show=show, quiet=quiet)
+ path2file(fi, '/tmp/mono_css.html', mono, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/lite.html', lite, show=show, quiet=quiet)
+ path2file(fi, '/tmp/lite_css.html', lite, show=show,
+ markup='css', quiet=quiet, header='', footer='',
+ linenumbers=1)
+ path2file(fi, '/tmp/lite_xhtml.html', lite, show=show,
+ markup='xhtml', quiet=quiet)
+ path2file(fi, '/tmp/dark.html', dark, show=show, quiet=quiet)
+ path2file(fi, '/tmp/dark_css.html', dark, show=show,
+ markup='css', quiet=quiet, linenumbers=1)
+ path2file(fi, '/tmp/dark2.html', dark2, show=show, quiet=quiet)
+ path2file(fi, '/tmp/dark2_css.html', dark2, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/dark2_xhtml.html', dark2, show=show,
+ markup='xhtml', quiet=quiet, header='', footer='',
+ linenumbers=1, form='external')
+ path2file(fi, '/tmp/idle.html', idle, show=show, quiet=quiet)
+ path2file(fi, '/tmp/idle_css.html', idle, show=show,
+ markup='css', quiet=quiet)
+ path2file(fi, '/tmp/viewcvs.html', viewcvs, show=show,
+ quiet=quiet, linenumbers=1)
+ path2file(fi, '/tmp/viewcvs_css.html', viewcvs, show=show,
+ markup='css', linenumbers=1, quiet=quiet)
+ path2file(fi, '/tmp/pythonwin.html', pythonwin, show=show,
+ quiet=quiet)
+ path2file(fi, '/tmp/pythonwin_css.html', pythonwin, show=show,
+ markup='css', quiet=quiet)
+ teststr=r'''"""This is a test of decorators and other things"""
+# This should be line 421...
+@whatever(arg,arg2)
+@A @B(arghh) @C
+def LlamaSaysNi(arg='Ni!',arg2="RALPH"):
+ """This docstring is deeply disturbed by all the llama references"""
+ print '%s The Wonder Llama says %s'% (arg2,arg)
+# So I was like duh!, and he was like ya know?!,
+# and so we were both like huh...wtf!? RTFM!! LOL!!;)
+@staticmethod## Double comments are KewL.
+def LlamasRLumpy():
+ """This docstring is too sexy to be here.
+ """
+ u"""
+=============================
+A Møøse once bit my sister...
+=============================
+ """
+ ## Relax, this won't hurt a bit, just a simple, painless procedure,
+ ## hold still while I get the anesthetizing hammer.
+ m = {'three':'1','won':'2','too':'3'}
+ o = r'fishy\fishy\fishy/fish\oh/where/is\my/little\..'
+ python = uR"""
+ No realli! She was Karving her initials øn the møøse with the sharpened end
+ of an interspace tøøthbrush given her by Svenge - her brother-in-law -an Oslo
+ dentist and star of many Norwegian møvies: "The Høt Hands of an Oslo
+ Dentist", "Fillings of Passion", "The Huge Mølars of Horst Nordfink"..."""
+ RU"""142 MEXICAN WHOOPING LLAMAS"""#<-Can you fit 142 llamas in a red box?
+ n = u' HERMSGERVØRDENBRØTBØRDA ' + """ YUTTE """
+ t = """SAMALLNIATNUOMNAIRODAUCE"""+"DENIARTYLLAICEPS04"
+ ## We apologise for the fault in the
+ ## comments. Those responsible have been
+ ## sacked.
+ y = '14 NORTH CHILEAN GUANACOS \
+(CLOSELY RELATED TO THE LLAMA)'
+ rules = [0,1,2,3,4,5]
+ print y'''
+ htmlPath = os.path.abspath('/tmp/strtest_lines.html')
+ str2file(teststr, htmlPath, colors=dark, markup='xhtml',
+ linenumbers=420, show=show)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ htmlPath = os.path.abspath('/tmp/strtest_nolines.html')
+ str2file(teststr, htmlPath, colors=dark, markup='xhtml',
+ show=show)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ else:
+ Usage()
+ return
+
+# emacs wants this: '
+
+####################################################### User funtctions
+
+def str2stdout(sourcestring, colors=None, title='', markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code(string) to colorized HTML. Writes to stdout.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ Parser(sourcestring, colors=colors, title=title, markup=markup,
+ header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+
+def path2stdout(sourcepath, title='', colors=None, markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts code(file) to colorized HTML. Writes to stdout.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ sourcestring = open(sourcepath).read()
+ Parser(sourcestring, colors=colors, title=sourcepath,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+
+def str2html(sourcestring, colors=None, title='',
+ markup='html', header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code(string) to colorized HTML. Returns an HTML string.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ stringIO = StringIO.StringIO()
+ Parser(sourcestring, colors=colors, title=title, out=stringIO,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+ stringIO.seek(0)
+ return stringIO.read()
+
+def str2css(sourcestring, colors=None, title='',
+ markup='css', header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts a code string to colorized CSS/HTML. Returns CSS/HTML string
+
+ If form != None then this will return (stylesheet_str, code_str)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ if markup.lower() not in ['css' ,'xhtml']:
+ markup = 'css'
+ stringIO = StringIO.StringIO()
+ parse = Parser(sourcestring, colors=colors, title=title,
+ out=stringIO, markup=markup,
+ header=header, footer=footer,
+ linenumbers=linenumbers)
+ parse.format(form)
+ stringIO.seek(0)
+ if form != None:
+ return parse._sendCSSStyle(external=1), stringIO.read()
+ else:
+ return None, stringIO.read()
+
+def str2markup(sourcestring, colors=None, title = '',
+ markup='xhtml', header=None, footer=None,
+ linenumbers=0, form=None):
+ """ Convert code strings into ([stylesheet or None], colorized string) """
+ if markup.lower() == 'html':
+ return None, str2html(sourcestring, colors=colors, title=title,
+ header=header, footer=footer, markup=markup,
+ linenumbers=linenumbers, form=form)
+ else:
+ return str2css(sourcestring, colors=colors, title=title,
+ header=header, footer=footer, markup=markup,
+ linenumbers=linenumbers, form=form)
+
+def str2file(sourcestring, outfile, colors=None, title='',
+ markup='html', header=None, footer=None,
+ linenumbers=0, show=0, dosheet=1, form=None):
+ """Converts a code string to a file.
+
+ makes no attempt at correcting bad pathnames
+ """
+ css , html = str2markup(sourcestring, colors=colors, title='',
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers, form=form)
+ # write html
+ f = open(outfile,'wt')
+ f.writelines(html)
+ f.close()
+ #write css
+ if css != None and dosheet:
+ dir = os.path.dirname(outfile)
+ outcss = os.path.join(dir,'pystyle.css')
+ f = open(outcss,'wt')
+ f.writelines(css)
+ f.close()
+ if show:
+ showpage(outfile)
+
+def path2html(sourcepath, colors=None, markup='html',
+ header=None, footer=None,
+ linenumbers=0, form=None):
+ """Converts code(file) to colorized HTML. Returns an HTML string.
+
+ form='code',or'snip' (for "<pre>yourcode</pre>" only)
+ colors=null,mono,lite,dark,dark2,idle,or pythonwin
+ """
+ stringIO = StringIO.StringIO()
+ sourcestring = open(sourcepath).read()
+ Parser(sourcestring, colors, title=sourcepath, out=stringIO,
+ markup=markup, header=header, footer=footer,
+ linenumbers=linenumbers).format(form)
+ stringIO.seek(0)
+ return stringIO.read()
+
+def convert(source, outdir=None, colors=None,
+ show=0, markup='html', quiet=0,
+ header=None, footer=None, linenumbers=0, form=None):
+ """Takes a file or dir as input and places the html in the outdir.
+
+ If outdir is none it defaults to the input dir
+ """
+ count=0
+ # If it is a filename then path2file
+ if not os.path.isdir(source):
+ if os.path.isfile(source):
+ count+=1
+ path2file(source, outdir, colors, show, markup,
+ quiet, form, header, footer, linenumbers, count)
+ else:
+ raise PathError, 'File does not exist!'
+ # If we pass in a dir we need to walkdir for files.
+ # Then we need to colorize them with path2file
+ else:
+ fileList = walkdir(source)
+ if fileList != None:
+ # make sure outdir is a dir
+ if outdir != None:
+ if os.path.splitext(outdir)[1] != '':
+ outdir = os.path.split(outdir)[0]
+ for item in fileList:
+ count+=1
+ path2file(item, outdir, colors, show, markup,
+ quiet, form, header, footer, linenumbers, count)
+ _printinfo('Completed colorizing %s files.'%str(count), quiet)
+ else:
+ _printinfo("No files to convert in dir.", quiet)
+
+def path2file(sourcePath, out=None, colors=None, show=0,
+ markup='html', quiet=0, form=None,
+ header=None, footer=None, linenumbers=0, count=1):
+ """ Converts python source to html file"""
+ # If no outdir is given we use the sourcePath
+ if out == None:#this is a guess
+ htmlPath = sourcePath + '.html'
+ else:
+ # If we do give an out_dir, and it does
+ # not exist , it will be created.
+ if os.path.splitext(out)[1] == '':
+ if not os.path.isdir(out):
+ os.makedirs(out)
+ sourceName = os.path.basename(sourcePath)
+ htmlPath = os.path.join(out,sourceName)+'.html'
+ # If we do give an out_name, and its dir does
+ # not exist , it will be created.
+ else:
+ outdir = os.path.split(out)[0]
+ if not os.path.isdir(outdir):
+ os.makedirs(outdir)
+ htmlPath = out
+ htmlPath = os.path.abspath(htmlPath)
+ # Open the text and do the parsing.
+ source = open(sourcePath).read()
+ parse = Parser(source, colors, sourcePath, open(htmlPath, 'wt'),
+ markup, header, footer, linenumbers)
+ parse.format(form)
+ _printinfo(" wrote %s" % htmlPath, quiet)
+ # html markup will ignore the external flag, but
+ # we need to stop the blank file from being written.
+ if form == 'external' and count == 1 and markup != 'html':
+ cssSheet = parse._sendCSSStyle(external=1)
+ cssPath = os.path.join(os.path.dirname(htmlPath),'pystyle.css')
+ css = open(cssPath, 'wt')
+ css.write(cssSheet)
+ css.close()
+ _printinfo(" wrote %s" % cssPath, quiet)
+ if show:
+ # load HTML page into the default web browser.
+ showpage(htmlPath)
+ return htmlPath
+
+def tagreplace(sourcestr, colors=lite, markup='xhtml',
+ linenumbers=0, dosheet=1, tagstart='<PY>'.lower(),
+ tagend='</PY>'.lower(), stylesheet='pystyle.css'):
+ """This is a helper function for pageconvert. Returns css, page.
+ """
+ if markup.lower() != 'html':
+ link = '<link rel="stylesheet" href="%s" type="text/css"/></head>'
+ css = link%stylesheet
+ if sourcestr.find(css) == -1:
+ sourcestr = sourcestr.replace('</head>', css, 1)
+ starttags = sourcestr.count(tagstart)
+ endtags = sourcestr.count(tagend)
+ if starttags:
+ if starttags == endtags:
+ for _ in range(starttags):
+ datastart = sourcestr.find(tagstart)
+ dataend = sourcestr.find(tagend)
+ data = sourcestr[datastart+len(tagstart):dataend]
+ data = unescape(data)
+ css , data = str2markup(data, colors=colors,
+ linenumbers=linenumbers, markup=markup, form='embed')
+ start = sourcestr[:datastart]
+ end = sourcestr[dataend+len(tagend):]
+ sourcestr = ''.join([start,data,end])
+ else:
+ raise InputError,'Tag mismatch!\nCheck %s,%s tags'%tagstart,tagend
+ if not dosheet:
+ css = None
+ return css, sourcestr
+
+def pageconvert(path, out=None, colors=lite, markup='xhtml', linenumbers=0,
+ dosheet=1, tagstart='<PY>'.lower(), tagend='</PY>'.lower(),
+ stylesheet='pystyle', show=1, returnstr=0):
+ """This function can colorize Python source
+
+ that is written in a webpage enclosed in tags.
+ """
+ if out == None:
+ out = os.path.dirname(path)
+ infile = open(path, 'r').read()
+ css,page = tagreplace(sourcestr=infile,colors=colors,
+ markup=markup, linenumbers=linenumbers, dosheet=dosheet,
+ tagstart=tagstart, tagend=tagend, stylesheet=stylesheet)
+ if not returnstr:
+ newpath = os.path.abspath(os.path.join(
+ out,'tmp', os.path.basename(path)))
+ if not os.path.exists(newpath):
+ try:
+ os.makedirs(os.path.dirname(newpath))
+ except:
+ pass#traceback.print_exc()
+ #Usage()
+ y = open(newpath, 'w')
+ y.write(page)
+ y.close()
+ if css:
+ csspath = os.path.abspath(os.path.join(
+ out,'tmp','%s.css'%stylesheet))
+ x = open(csspath,'w')
+ x.write(css)
+ x.close()
+ if show:
+ try:
+ os.startfile(newpath)
+ except:
+ traceback.print_exc()
+ return newpath
+ else:
+ return css, page
+
+##################################################################### helpers
+
+def walkdir(dir):
+ """Return a list of .py and .pyw files from a given directory.
+
+ This function can be written as a generator Python 2.3, or a genexp
+ in Python 2.4. But 2.2 and 2.1 would be left out....
+ """
+ # Get a list of files that match *.py*
+ GLOB_PATTERN = os.path.join(dir, "*.[p][y]*")
+ pathlist = glob.glob(GLOB_PATTERN)
+ # Now filter out all but py and pyw
+ filterlist = [x for x in pathlist
+ if x.endswith('.py')
+ or x.endswith('.pyw')]
+ if filterlist != []:
+ # if we have a list send it
+ return filterlist
+ else:
+ return None
+
+def showpage(path):
+ """Helper function to open webpages"""
+ try:
+ import webbrowser
+ webbrowser.open_new(os.path.abspath(path))
+ except:
+ traceback.print_exc()
+
+def _printinfo(message, quiet):
+ """Helper to print messages"""
+ if not quiet:
+ print message
+
+def escape(text):
+ """escape text for html. similar to cgi.escape"""
+ text = text.replace("&", "&amp;")
+ text = text.replace("<", "&lt;")
+ text = text.replace(">", "&gt;")
+ return text
+
+def unescape(text):
+ """unsecape escaped text"""
+ text = text.replace("&quot;", '"')
+ text = text.replace("&gt;", ">")
+ text = text.replace("&lt;", "<")
+ text = text.replace("&amp;", "&")
+ return text
+
+########################################################### Custom Exceptions
+
+class PySourceColorError(Exception):
+ # Base for custom errors
+ def __init__(self, msg=''):
+ self._msg = msg
+ Exception.__init__(self, msg)
+ def __repr__(self):
+ return self._msg
+ __str__ = __repr__
+
+class PathError(PySourceColorError):
+ def __init__(self, msg):
+ PySourceColorError.__init__(self,
+ 'Path error! : %s'% msg)
+
+class InputError(PySourceColorError):
+ def __init__(self, msg):
+ PySourceColorError.__init__(self,
+ 'Input error! : %s'% msg)
+
+########################################################## Python code parser
+
+class Parser(object):
+
+ """MoinMoin python parser heavily chopped :)"""
+
+ def __init__(self, raw, colors=None, title='', out=sys.stdout,
+ markup='html', header=None, footer=None, linenumbers=0):
+ """Store the source text & set some flags"""
+ if colors == None:
+ colors = defaultColors
+ self.raw = raw.expandtabs().rstrip()
+ self.title = os.path.basename(title)
+ self.out = out
+ self.line = ''
+ self.lasttext = ''
+ self.argFlag = 0
+ self.classFlag = 0
+ self.defFlag = 0
+ self.decoratorFlag = 0
+ self.external = 0
+ self.markup = markup.upper()
+ self.colors = colors
+ self.header = header
+ self.footer = footer
+ self.doArgs = 1 # overrides the new tokens
+ self.doNames = 1 # overrides the new tokens
+ self.doMathOps = 1 # overrides the new tokens
+ self.doBrackets = 1 # overrides the new tokens
+ self.doURL = 1 # override url conversion
+ self.LINENUMHOLDER = "___line___".upper()
+ self.LINESTART = "___start___".upper()
+ self.skip = 0
+ # add space left side of code for padding.Override in color dict.
+ self.extraspace = self.colors.get(EXTRASPACE, '')
+ # Linenumbers less then zero also have numberlinks
+ self.dolinenums = self.linenum = abs(linenumbers)
+ if linenumbers < 0:
+ self.numberlinks = 1
+ else:
+ self.numberlinks = 0
+
+ def format(self, form=None):
+ """Parse and send the colorized source"""
+ if form in ('snip','code'):
+ self.addEnds = 0
+ elif form == 'embed':
+ self.addEnds = 0
+ self.external = 1
+ else:
+ if form == 'external':
+ self.external = 1
+ self.addEnds = 1
+
+ # Store line offsets in self.lines
+ self.lines = [0, 0]
+ pos = 0
+
+ # Add linenumbers
+ if self.dolinenums:
+ start=self.LINENUMHOLDER+' '+self.extraspace
+ else:
+ start=''+self.extraspace
+ newlines = []
+ lines = self.raw.splitlines(0)
+ for l in lines:
+ # span and div escape for customizing and embedding raw text
+ if (l.startswith('#$#')
+ or l.startswith('#%#')
+ or l.startswith('#@#')):
+ newlines.append(l)
+ else:
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ newlines.append(self.LINESTART+' '+start+l)
+ else:
+ newlines.append(start+l)
+ self.raw = "\n".join(newlines)+'\n'# plus an extra newline at the end
+
+ # Gather lines
+ while 1:
+ pos = self.raw.find('\n', pos) + 1
+ if not pos: break
+ self.lines.append(pos)
+ self.lines.append(len(self.raw))
+
+ # Wrap text in a filelike object
+ self.pos = 0
+ text = StringIO.StringIO(self.raw)
+
+ # Markup start
+ if self.addEnds:
+ self._doPageStart()
+ else:
+ self._doSnippetStart()
+
+ ## Tokenize calls the __call__
+ ## function for each token till done.
+ # Parse the source and write out the results.
+ try:
+ tokenize.tokenize(text.readline, self)
+ except tokenize.TokenError, ex:
+ msg = ex[0]
+ line = ex[1][0]
+ self.out.write("<h3>ERROR: %s</h3>%s\n"%
+ (msg, self.raw[self.lines[line]:]))
+ #traceback.print_exc()
+
+ # Markup end
+ if self.addEnds:
+ self._doPageEnd()
+ else:
+ self._doSnippetEnd()
+
+ def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
+ """Token handler. Order is important do not rearrange."""
+ self.line = line
+ # Calculate new positions
+ oldpos = self.pos
+ newpos = self.lines[srow] + scol
+ self.pos = newpos + len(toktext)
+ # Handle newlines
+ if toktype in (token.NEWLINE, tokenize.NL):
+ self.decoratorFlag = self.argFlag = 0
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ self.out.write('</span>')
+ self.out.write('\n')
+ return
+
+ # Send the original whitespace, and tokenize backslashes if present.
+ # Tokenizer.py just sends continued line backslashes with whitespace.
+ # This is a hack to tokenize continued line slashes as operators.
+ # Should continued line backslashes be treated as operators
+ # or some other token?
+
+ if newpos > oldpos:
+ if self.raw[oldpos:newpos].isspace():
+ # consume a single space after linestarts and linenumbers
+ # had to have them so tokenizer could seperate them.
+ # multiline strings are handled by do_Text functions
+ if self.lasttext != self.LINESTART \
+ and self.lasttext != self.LINENUMHOLDER:
+ self.out.write(self.raw[oldpos:newpos])
+ else:
+ self.out.write(self.raw[oldpos+1:newpos])
+ else:
+ slash = self.raw[oldpos:newpos].find('\\')+oldpos
+ self.out.write(self.raw[oldpos:slash])
+ getattr(self, '_send%sText'%(self.markup))(OPERATOR, '\\')
+ self.linenum+=1
+ # kludge for line spans in css,xhtml
+ if self.markup in ['XHTML','CSS']:
+ self.out.write('</span>')
+ self.out.write(self.raw[slash+1:newpos])
+
+ # Skip indenting tokens
+ if toktype in (token.INDENT, token.DEDENT):
+ self.pos = newpos
+ return
+
+ # Look for operators
+ if token.LPAR <= toktype and toktype <= token.OP:
+ # Trap decorators py2.4 >
+ if toktext == '@':
+ toktype = DECORATOR
+ # Set a flag if this was the decorator start so
+ # the decorator name and arguments can be identified
+ self.decoratorFlag = self.argFlag = 1
+ else:
+ if self.doArgs:
+ # Find the start for arguments
+ if toktext == '(' and self.argFlag:
+ self.argFlag = 2
+ # Find the end for arguments
+ elif toktext == ':':
+ self.argFlag = 0
+ ## Seperate the diffrent operator types
+ # Brackets
+ if self.doBrackets and toktext in ['[',']','(',')','{','}']:
+ toktype = BRACKETS
+ # Math operators
+ elif self.doMathOps and toktext in ['*=','**=','-=','+=','|=',
+ '%=','>>=','<<=','=','^=',
+ '/=', '+','-','**','*','/','%']:
+ toktype = MATH_OPERATOR
+ # Operator
+ else:
+ toktype = OPERATOR
+ # example how flags should work.
+ # def fun(arg=argvalue,arg2=argvalue2):
+ # 0 1 2 A 1 N 2 A 1 N 0
+ if toktext == "=" and self.argFlag == 2:
+ self.argFlag = 1
+ elif toktext == "," and self.argFlag == 1:
+ self.argFlag = 2
+ # Look for keywords
+ elif toktype == NAME and keyword.iskeyword(toktext):
+ toktype = KEYWORD
+ # Set a flag if this was the class / def start so
+ # the class / def name and arguments can be identified
+ if toktext in ['class', 'def']:
+ if toktext =='class' and \
+ not line[:line.find('class')].endswith('.'):
+ self.classFlag = self.argFlag = 1
+ elif toktext == 'def' and \
+ not line[:line.find('def')].endswith('.'):
+ self.defFlag = self.argFlag = 1
+ else:
+ # must have used a keyword as a name i.e. self.class
+ toktype = ERRORTOKEN
+
+ # Look for class, def, decorator name
+ elif (self.classFlag or self.defFlag or self.decoratorFlag) \
+ and self.doNames:
+ if self.classFlag:
+ self.classFlag = 0
+ toktype = CLASS_NAME
+ elif self.defFlag:
+ self.defFlag = 0
+ toktype = DEF_NAME
+ elif self.decoratorFlag:
+ self.decoratorFlag = 0
+ toktype = DECORATOR_NAME
+
+ # Look for strings
+ # Order of evaluation is important do not change.
+ elif toktype == token.STRING:
+ text = toktext.lower()
+ # TRIPLE DOUBLE QUOTE's
+ if (text[:3] == '"""'):
+ toktype = TRIPLEDOUBLEQUOTE
+ elif (text[:4] == 'r"""'):
+ toktype = TRIPLEDOUBLEQUOTE_R
+ elif (text[:4] == 'u"""' or
+ text[:5] == 'ur"""'):
+ toktype = TRIPLEDOUBLEQUOTE_U
+ # DOUBLE QUOTE's
+ elif (text[:1] == '"'):
+ toktype = DOUBLEQUOTE
+ elif (text[:2] == 'r"'):
+ toktype = DOUBLEQUOTE_R
+ elif (text[:2] == 'u"' or
+ text[:3] == 'ur"'):
+ toktype = DOUBLEQUOTE_U
+ # TRIPLE SINGLE QUOTE's
+ elif (text[:3] == "'''"):
+ toktype = TRIPLESINGLEQUOTE
+ elif (text[:4] == "r'''"):
+ toktype = TRIPLESINGLEQUOTE_R
+ elif (text[:4] == "u'''" or
+ text[:5] == "ur'''"):
+ toktype = TRIPLESINGLEQUOTE_U
+ # SINGLE QUOTE's
+ elif (text[:1] == "'"):
+ toktype = SINGLEQUOTE
+ elif (text[:2] == "r'"):
+ toktype = SINGLEQUOTE_R
+ elif (text[:2] == "u'" or
+ text[:3] == "ur'"):
+ toktype = SINGLEQUOTE_U
+
+ # test for invalid string declaration
+ if self.lasttext.lower() == 'ru':
+ toktype = ERRORTOKEN
+
+ # Look for comments
+ elif toktype == COMMENT:
+ if toktext[:2] == "##":
+ toktype = DOUBLECOMMENT
+ elif toktext[:3] == '#$#':
+ toktype = TEXT
+ self.textFlag = 'SPAN'
+ toktext = toktext[3:]
+ elif toktext[:3] == '#%#':
+ toktype = TEXT
+ self.textFlag = 'DIV'
+ toktext = toktext[3:]
+ elif toktext[:3] == '#@#':
+ toktype = TEXT
+ self.textFlag = 'RAW'
+ toktext = toktext[3:]
+ if self.doURL:
+ # this is a 'fake helper function'
+ # url(URI,Alias_name) or url(URI)
+ url_pos = toktext.find('url(')
+ if url_pos != -1:
+ before = toktext[:url_pos]
+ url = toktext[url_pos+4:]
+ splitpoint = url.find(',')
+ endpoint = url.find(')')
+ after = url[endpoint+1:]
+ url = url[:endpoint]
+ if splitpoint != -1:
+ urlparts = url.split(',',1)
+ toktext = '%s<a href="%s">%s</a>%s'%(
+ before,urlparts[0],urlparts[1].lstrip(),after)
+ else:
+ toktext = '%s<a href="%s">%s</a>%s'%(before,url,url,after)
+
+ # Seperate errors from decorators
+ elif toktype == ERRORTOKEN:
+ # Bug fix for < py2.4
+ # space between decorators
+ if self.argFlag and toktext.isspace():
+ #toktype = NAME
+ self.out.write(toktext)
+ return
+ # Bug fix for py2.2 linenumbers with decorators
+ elif toktext.isspace():
+ # What if we have a decorator after a >>> or ...
+ #p = line.find('@')
+ #if p >= 0 and not line[:p].isspace():
+ #self.out.write(toktext)
+ #return
+ if self.skip:
+ self.skip=0
+ return
+ else:
+ self.out.write(toktext)
+ return
+ # trap decorators < py2.4
+ elif toktext == '@':
+ toktype = DECORATOR
+ # Set a flag if this was the decorator start so
+ # the decorator name and arguments can be identified
+ self.decoratorFlag = self.argFlag = 1
+
+ # Seperate args from names
+ elif (self.argFlag == 2 and
+ toktype == NAME and
+ toktext != 'None' and
+ self.doArgs):
+ toktype = ARGS
+
+ # Look for line numbers
+ # The conversion code for them is in the send_text functions.
+ if toktext in [self.LINENUMHOLDER,self.LINESTART]:
+ toktype = LINENUMBER
+ # if we don't have linenumbers set flag
+ # to skip the trailing space from linestart
+ if toktext == self.LINESTART and not self.dolinenums \
+ or toktext == self.LINENUMHOLDER:
+ self.skip=1
+
+
+ # Skip blank token that made it thru
+ ## bugfix for the last empty tag.
+ if toktext == '':
+ return
+
+ # Last token text history
+ self.lasttext = toktext
+
+ # escape all but the urls in the comments
+ if toktype in (DOUBLECOMMENT, COMMENT):
+ if toktext.find('<a href=') == -1:
+ toktext = escape(toktext)
+ else:
+ pass
+ elif toktype == TEXT:
+ pass
+ else:
+ toktext = escape(toktext)
+
+ # Send text for any markup
+ getattr(self, '_send%sText'%(self.markup))(toktype, toktext)
+ return
+
+ ################################################################# Helpers
+
+ def _doSnippetStart(self):
+ if self.markup == 'HTML':
+ # Start of html snippet
+ self.out.write('<pre>\n')
+ else:
+ # Start of css/xhtml snippet
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+
+ def _doSnippetEnd(self):
+ # End of html snippet
+ self.out.write(self.colors.get(CODEEND,'</pre>\n'))
+
+ ######################################################## markup selectors
+
+ def _getFile(self, filepath):
+ try:
+ _file = open(filepath,'r')
+ content = _file.read()
+ _file.close()
+ except:
+ traceback.print_exc()
+ content = ''
+ return content
+
+ def _doPageStart(self):
+ getattr(self, '_do%sStart'%(self.markup))()
+
+ def _doPageHeader(self):
+ if self.header != None:
+ if self.header.find('#$#') != -1 or \
+ self.header.find('#$#') != -1 or \
+ self.header.find('#%#') != -1:
+ self.out.write(self.header[3:])
+ else:
+ if self.header != '':
+ self.header = self._getFile(self.header)
+ getattr(self, '_do%sHeader'%(self.markup))()
+
+ def _doPageFooter(self):
+ if self.footer != None:
+ if self.footer.find('#$#') != -1 or \
+ self.footer.find('#@#') != -1 or \
+ self.footer.find('#%#') != -1:
+ self.out.write(self.footer[3:])
+ else:
+ if self.footer != '':
+ self.footer = self._getFile(self.footer)
+ getattr(self, '_do%sFooter'%(self.markup))()
+
+ def _doPageEnd(self):
+ getattr(self, '_do%sEnd'%(self.markup))()
+
+ ################################################### color/style retrieval
+ ## Some of these are not used anymore but are kept for documentation
+
+ def _getLineNumber(self):
+ num = self.linenum
+ self.linenum+=1
+ return str(num).rjust(5)+" "
+
+ def _getTags(self, key):
+ # style tags
+ return self.colors.get(key, self.colors[NAME])[0]
+
+ def _getForeColor(self, key):
+ # get text foreground color, if not set to black
+ color = self.colors.get(key, self.colors[NAME])[1]
+ if color[:1] != '#':
+ color = '#000000'
+ return color
+
+ def _getBackColor(self, key):
+ # get text background color
+ return self.colors.get(key, self.colors[NAME])[2]
+
+ def _getPageColor(self):
+ # get page background color
+ return self.colors.get(PAGEBACKGROUND, '#FFFFFF')
+
+ def _getStyle(self, key):
+ # get the token style from the color dictionary
+ return self.colors.get(key, self.colors[NAME])
+
+ def _getMarkupClass(self, key):
+ # get the markup class name from the markup dictionary
+ return MARKUPDICT.get(key, MARKUPDICT[NAME])
+
+ def _getDocumentCreatedBy(self):
+ return '<!--This document created by %s ver.%s on: %s-->\n'%(
+ __title__,__version__,time.ctime())
+
+ ################################################### HTML markup functions
+
+ def _doHTMLStart(self):
+ # Start of html page
+ self.out.write('<!DOCTYPE html PUBLIC \
+"-//W3C//DTD HTML 4.01//EN">\n')
+ self.out.write('<html><head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1">\n')
+ # Get background
+ self.out.write('</head><body bgcolor="%s">\n'%self._getPageColor())
+ self._doPageHeader()
+ self.out.write('<pre>')
+
+ def _getHTMLStyles(self, toktype, toktext):
+ # Get styles
+ tags, color = self.colors.get(toktype, self.colors[NAME])[:2]#
+ tagstart=[]
+ tagend=[]
+ # check for styles and set them if needed.
+ if 'b' in tags:#Bold
+ tagstart.append('<b>')
+ tagend.append('</b>')
+ if 'i' in tags:#Italics
+ tagstart.append('<i>')
+ tagend.append('</i>')
+ if 'u' in tags:#Underline
+ tagstart.append('<u>')
+ tagend.append('</u>')
+ # HTML tags should be paired like so : <b><i><u>Doh!</u></i></b>
+ tagend.reverse()
+ starttags="".join(tagstart)
+ endtags="".join(tagend)
+ return starttags,endtags,color
+
+ def _sendHTMLText(self, toktype, toktext):
+ numberlinks = self.numberlinks
+
+ # If it is an error, set a red box around the bad tokens
+ # older browsers should ignore it
+ if toktype == ERRORTOKEN:
+ style = ' style="border: solid 1.5pt #FF0000;"'
+ else:
+ style = ''
+ # Get styles
+ starttag, endtag, color = self._getHTMLStyles(toktype, toktext)
+ # This is a hack to 'fix' multi-line strings.
+ # Multi-line strings are treated as only one token
+ # even though they can be several physical lines.
+ # That makes it hard to spot the start of a line,
+ # because at this level all we know about are tokens.
+
+ if toktext.count(self.LINENUMHOLDER):
+ # rip apart the string and separate it by line.
+ # count lines and change all linenum token to line numbers.
+ # embedded all the new font tags inside the current one.
+ # Do this by ending the tag first then writing our new tags,
+ # then starting another font tag exactly like the first one.
+ if toktype == LINENUMBER:
+ splittext = toktext.split(self.LINENUMHOLDER)
+ else:
+ splittext = toktext.split(self.LINENUMHOLDER+' ')
+ store = []
+ store.append(splittext.pop(0))
+ lstarttag, lendtag, lcolor = self._getHTMLStyles(LINENUMBER, toktext)
+ count = len(splittext)
+ for item in splittext:
+ num = self._getLineNumber()
+ if numberlinks:
+ numstrip = num.strip()
+ content = '<a name="%s" href="#%s">%s</a>' \
+ %(numstrip,numstrip,num)
+ else:
+ content = num
+ if count <= 1:
+ endtag,starttag = '',''
+ linenumber = ''.join([endtag,'<font color=', lcolor, '>',
+ lstarttag, content, lendtag, '</font>' ,starttag])
+ store.append(linenumber+item)
+ toktext = ''.join(store)
+ # send text
+ ## Output optimization
+ # skip font tag if black text, but styles will still be sent. (b,u,i)
+ if color !='#000000':
+ startfont = '<font color="%s"%s>'%(color, style)
+ endfont = '</font>'
+ else:
+ startfont, endfont = ('','')
+ if toktype != LINENUMBER:
+ self.out.write(''.join([startfont,starttag,
+ toktext,endtag,endfont]))
+ else:
+ self.out.write(toktext)
+ return
+
+ def _doHTMLHeader(self):
+ # Optional
+ if self.header != '':
+ self.out.write('%s\n'%self.header)
+ else:
+ color = self._getForeColor(NAME)
+ self.out.write('<b><font color="%s"># %s \
+ <br># %s</font></b><hr>\n'%
+ (color, self.title, time.ctime()))
+
+ def _doHTMLFooter(self):
+ # Optional
+ if self.footer != '':
+ self.out.write('%s\n'%self.footer)
+ else:
+ color = self._getForeColor(NAME)
+ self.out.write('<b><font color="%s"> \
+ <hr># %s<br># %s</font></b>\n'%
+ (color, self.title, time.ctime()))
+
+ def _doHTMLEnd(self):
+ # End of html page
+ self.out.write('</pre>\n')
+ # Write a little info at the bottom
+ self._doPageFooter()
+ self.out.write('</body></html>\n')
+
+ #################################################### CSS markup functions
+
+ def _getCSSStyle(self, key):
+ # Get the tags and colors from the dictionary
+ tags, forecolor, backcolor = self._getStyle(key)
+ style=[]
+ border = None
+ bordercolor = None
+ tags = tags.lower()
+ if tags:
+ # get the border color if specified
+ # the border color will be appended to
+ # the list after we define a border
+ if '#' in tags:# border color
+ start = tags.find('#')
+ end = start + 7
+ bordercolor = tags[start:end]
+ tags.replace(bordercolor,'',1)
+ # text styles
+ if 'b' in tags:# Bold
+ style.append('font-weight:bold;')
+ else:
+ style.append('font-weight:normal;')
+ if 'i' in tags:# Italic
+ style.append('font-style:italic;')
+ if 'u' in tags:# Underline
+ style.append('text-decoration:underline;')
+ # border size
+ if 'l' in tags:# thick border
+ size='thick'
+ elif 'm' in tags:# medium border
+ size='medium'
+ elif 't' in tags:# thin border
+ size='thin'
+ else:# default
+ size='medium'
+ # border styles
+ if 'n' in tags:# inset border
+ border='inset'
+ elif 'o' in tags:# outset border
+ border='outset'
+ elif 'r' in tags:# ridge border
+ border='ridge'
+ elif 'g' in tags:# groove border
+ border='groove'
+ elif '=' in tags:# double border
+ border='double'
+ elif '.' in tags:# dotted border
+ border='dotted'
+ elif '-' in tags:# dashed border
+ border='dashed'
+ elif 's' in tags:# solid border
+ border='solid'
+ # border type check
+ seperate_sides=0
+ for side in ['<','>','^','v']:
+ if side in tags:
+ seperate_sides+=1
+ # border box or seperate sides
+ if seperate_sides==0 and border:
+ style.append('border: %s %s;'%(border,size))
+ else:
+ if border == None:
+ border = 'solid'
+ if 'v' in tags:# bottom border
+ style.append('border-bottom:%s %s;'%(border,size))
+ if '<' in tags:# left border
+ style.append('border-left:%s %s;'%(border,size))
+ if '>' in tags:# right border
+ style.append('border-right:%s %s;'%(border,size))
+ if '^' in tags:# top border
+ style.append('border-top:%s %s;'%(border,size))
+ else:
+ style.append('font-weight:normal;')# css inherited style fix
+ # we have to define our borders before we set colors
+ if bordercolor:
+ style.append('border-color:%s;'%bordercolor)
+ # text forecolor
+ style.append('color:%s;'% forecolor)
+ # text backcolor
+ if backcolor:
+ style.append('background-color:%s;'%backcolor)
+ return (self._getMarkupClass(key),' '.join(style))
+
+ def _sendCSSStyle(self, external=0):
+ """ create external and internal style sheets"""
+ styles = []
+ external += self.external
+ if not external:
+ styles.append('<style type="text/css">\n<!--\n')
+ # Get page background color and write styles ignore any we don't know
+ styles.append('body { background:%s; }\n'%self._getPageColor())
+ # write out the various css styles
+ for key in MARKUPDICT:
+ styles.append('.%s { %s }\n'%self._getCSSStyle(key))
+ # If you want to style the pre tag you must modify the color dict.
+ # Example:
+ # lite[PY] = .py {border: solid thin #000000;background:#555555}\n'''
+ styles.append(self.colors.get(PY, '.py { }\n'))
+ # Extra css can be added here
+ # add CSSHOOK to the color dict if you need it.
+ # Example:
+ #lite[CSSHOOK] = """.mytag { border: solid thin #000000; } \n
+ # .myothertag { font-weight:bold; )\n"""
+ styles.append(self.colors.get(CSSHOOK,''))
+ if not self.external:
+ styles.append('--></style>\n')
+ return ''.join(styles)
+
+ def _doCSSStart(self):
+ # Start of css/html 4.01 page
+ self.out.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">\n')
+ self.out.write('<html><head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1">\n')
+ self._doCSSStyleSheet()
+ self.out.write('</head>\n<body>\n')
+ # Write a little info at the top.
+ self._doPageHeader()
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+ return
+
+ def _doCSSStyleSheet(self):
+ if not self.external:
+ # write an embedded style sheet
+ self.out.write(self._sendCSSStyle())
+ else:
+ # write a link to an external style sheet
+ self.out.write('<link rel="stylesheet" \
+href="pystyle.css" type="text/css">')
+ return
+
+ def _sendCSSText(self, toktype, toktext):
+ # This is a hack to 'fix' multi-line strings.
+ # Multi-line strings are treated as only one token
+ # even though they can be several physical lines.
+ # That makes it hard to spot the start of a line,
+ # because at this level all we know about are tokens.
+ markupclass = MARKUPDICT.get(toktype, MARKUPDICT[NAME])
+ # if it is a LINENUMBER type then we can skip the rest
+ if toktext == self.LINESTART and toktype == LINENUMBER:
+ self.out.write('<span class="py_line">')
+ return
+ if toktext.count(self.LINENUMHOLDER):
+ # rip apart the string and separate it by line
+ # count lines and change all linenum token to line numbers
+ # also convert linestart and lineend tokens
+ # <linestart> <lnumstart> lnum <lnumend> text <lineend>
+ #################################################
+ newmarkup = MARKUPDICT.get(LINENUMBER, MARKUPDICT[NAME])
+ lstartspan = '<span class="%s">'%(newmarkup)
+ if toktype == LINENUMBER:
+ splittext = toktext.split(self.LINENUMHOLDER)
+ else:
+ splittext = toktext.split(self.LINENUMHOLDER+' ')
+ store = []
+ # we have already seen the first linenumber token
+ # so we can skip the first one
+ store.append(splittext.pop(0))
+ for item in splittext:
+ num = self._getLineNumber()
+ if self.numberlinks:
+ numstrip = num.strip()
+ content= '<a name="%s" href="#%s">%s</a>' \
+ %(numstrip,numstrip,num)
+ else:
+ content = num
+ linenumber= ''.join([lstartspan,content,'</span>'])
+ store.append(linenumber+item)
+ toktext = ''.join(store)
+ if toktext.count(self.LINESTART):
+ # wraps the textline in a line span
+ # this adds a lot of kludges, is it really worth it?
+ store = []
+ parts = toktext.split(self.LINESTART+' ')
+ # handle the first part differently
+ # the whole token gets wraqpped in a span later on
+ first = parts.pop(0)
+ # place spans before the newline
+ pos = first.rfind('\n')
+ if pos != -1:
+ first=first[:pos]+'</span></span>'+first[pos:]
+ store.append(first)
+ #process the rest of the string
+ for item in parts:
+ #handle line numbers if present
+ if self.dolinenums:
+ item = item.replace('</span>',
+ '</span><span class="%s">'%(markupclass))
+ else:
+ item = '<span class="%s">%s'%(markupclass,item)
+ # add endings for line and string tokens
+ pos = item.rfind('\n')
+ if pos != -1:
+ item=item[:pos]+'</span></span>\n'
+ store.append(item)
+ # add start tags for lines
+ toktext = '<span class="py_line">'.join(store)
+ # Send text
+ if toktype != LINENUMBER:
+ if toktype == TEXT and self.textFlag == 'DIV':
+ startspan = '<div class="%s">'%(markupclass)
+ endspan = '</div>'
+ elif toktype == TEXT and self.textFlag == 'RAW':
+ startspan,endspan = ('','')
+ else:
+ startspan = '<span class="%s">'%(markupclass)
+ endspan = '</span>'
+ self.out.write(''.join([startspan, toktext, endspan]))
+ else:
+ self.out.write(toktext)
+ return
+
+ def _doCSSHeader(self):
+ if self.header != '':
+ self.out.write('%s\n'%self.header)
+ else:
+ name = MARKUPDICT.get(NAME)
+ self.out.write('<div class="%s"># %s <br> \
+# %s</div><hr>\n'%(name, self.title, time.ctime()))
+
+ def _doCSSFooter(self):
+ # Optional
+ if self.footer != '':
+ self.out.write('%s\n'%self.footer)
+ else:
+ self.out.write('<hr><div class="%s"># %s <br> \
+# %s</div>\n'%(MARKUPDICT.get(NAME),self.title, time.ctime()))
+
+ def _doCSSEnd(self):
+ # End of css/html page
+ self.out.write(self.colors.get(CODEEND,'</pre>\n'))
+ # Write a little info at the bottom
+ self._doPageFooter()
+ self.out.write('</body></html>\n')
+ return
+
+ ################################################## XHTML markup functions
+
+ def _doXHTMLStart(self):
+ # XHTML is really just XML + HTML 4.01.
+ # We only need to change the page headers,
+ # and a few tags to get valid XHTML.
+ # Start of xhtml page
+ self.out.write('<?xml version="1.0"?>\n \
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n \
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n \
+<html xmlns="http://www.w3.org/1999/xhtml">\n')
+ self.out.write('<head><title>%s</title>\n'%(self.title))
+ self.out.write(self._getDocumentCreatedBy())
+ self.out.write('<meta http-equiv="Content-Type" \
+content="text/html;charset=iso-8859-1"/>\n')
+ self._doXHTMLStyleSheet()
+ self.out.write('</head>\n<body>\n')
+ # Write a little info at the top.
+ self._doPageHeader()
+ self.out.write(self.colors.get(CODESTART,'<pre class="py">\n'))
+ return
+
+ def _doXHTMLStyleSheet(self):
+ if not self.external:
+ # write an embedded style sheet
+ self.out.write(self._sendCSSStyle())
+ else:
+ # write a link to an external style sheet
+ self.out.write('<link rel="stylesheet" \
+href="pystyle.css" type="text/css"/>\n')
+ return
+
+ def _sendXHTMLText(self, toktype, toktext):
+ self._sendCSSText(toktype, toktext)
+
+ def _doXHTMLHeader(self):
+ # Optional
+ if self.header:
+ self.out.write('%s\n'%self.header)
+ else:
+ name = MARKUPDICT.get(NAME)
+ self.out.write('<div class="%s"># %s <br/> \
+# %s</div><hr/>\n '%(
+ name, self.title, time.ctime()))
+
+ def _doXHTMLFooter(self):
+ # Optional
+ if self.footer:
+ self.out.write('%s\n'%self.footer)
+ else:
+ self.out.write('<hr/><div class="%s"># %s <br/> \
+# %s</div>\n'%(MARKUPDICT.get(NAME), self.title, time.ctime()))
+
+ def _doXHTMLEnd(self):
+ self._doCSSEnd()
+
+#############################################################################
+
+if __name__ == '__main__':
+ cli()
+
+#############################################################################
+# PySourceColor.py
+# 2004, 2005 M.E.Farmer Jr.
+# Python license
diff --git a/paste/util/UserDict24.py b/paste/util/UserDict24.py
new file mode 100644
index 0000000..e5b64f5
--- /dev/null
+++ b/paste/util/UserDict24.py
@@ -0,0 +1,167 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+ def __init__(self, dict=None, **kwargs):
+ self.data = {}
+ if dict is not None:
+ if not hasattr(dict,'keys'):
+ dict = type({})(dict) # make mapping from a sequence
+ self.update(dict)
+ if len(kwargs):
+ self.update(kwargs)
+ def __repr__(self): return repr(self.data)
+ def __cmp__(self, dict):
+ if isinstance(dict, UserDict):
+ return cmp(self.data, dict.data)
+ else:
+ return cmp(self.data, dict)
+ def __len__(self): return len(self.data)
+ def __getitem__(self, key): return self.data[key]
+ def __setitem__(self, key, item): self.data[key] = item
+ def __delitem__(self, key): del self.data[key]
+ def clear(self): self.data.clear()
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data)
+ import copy
+ data = self.data
+ try:
+ self.data = {}
+ c = copy.copy(self)
+ finally:
+ self.data = data
+ c.update(self)
+ return c
+ def keys(self): return self.data.keys()
+ def items(self): return self.data.items()
+ def iteritems(self): return self.data.iteritems()
+ def iterkeys(self): return self.data.iterkeys()
+ def itervalues(self): return self.data.itervalues()
+ def values(self): return self.data.values()
+ def has_key(self, key): return self.data.has_key(key)
+ def update(self, dict):
+ if isinstance(dict, UserDict):
+ self.data.update(dict.data)
+ elif isinstance(dict, type(self.data)):
+ self.data.update(dict)
+ else:
+ for k, v in dict.items():
+ self[k] = v
+ def get(self, key, failobj=None):
+ if not self.has_key(key):
+ return failobj
+ return self[key]
+ def setdefault(self, key, failobj=None):
+ if not self.has_key(key):
+ self[key] = failobj
+ return self[key]
+ def pop(self, key, *args):
+ return self.data.pop(key, *args)
+ def popitem(self):
+ return self.data.popitem()
+ def __contains__(self, key):
+ return key in self.data
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+ fromkeys = classmethod(fromkeys)
+
+class IterableUserDict(UserDict):
+ def __iter__(self):
+ return iter(self.data)
+
+class DictMixin:
+ # Mixin defining all dictionary methods for classes that already have
+ # a minimum dictionary interface including getitem, setitem, delitem,
+ # and keys. Without knowledge of the subclass constructor, the mixin
+ # does not define __init__() or copy(). In addition to the four base
+ # methods, progressively more efficiency comes with defining
+ # __contains__(), __iter__(), and iteritems().
+
+ # second level definitions support higher levels
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+ def has_key(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+ def __contains__(self, key):
+ return self.has_key(key)
+
+ # third level takes advantage of second level definitions
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+ def iterkeys(self):
+ return self.__iter__()
+
+ # fourth level uses definitions from lower levels
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+ def values(self):
+ return [v for _, v in self.iteritems()]
+ def items(self):
+ return list(self.iteritems())
+ def clear(self):
+ for key in self.keys():
+ del self[key]
+ def setdefault(self, key, default):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError, 'container is empty'
+ del self[k]
+ return (k, v)
+ def update(self, other):
+ # Make progressively weaker assumptions about "other"
+ if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
+ for k, v in other.iteritems():
+ self[k] = v
+ elif hasattr(other, '__iter__'): # iter saves memory
+ for k in other:
+ self[k] = other[k]
+ else:
+ for k in other.keys():
+ self[k] = other[k]
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+ def __cmp__(self, other):
+ if other is None:
+ return 1
+ if isinstance(other, DictMixin):
+ other = dict(other.iteritems())
+ return cmp(dict(self.iteritems()), other)
+ def __len__(self):
+ return len(self.keys())
+
+ def __nonzero__(self):
+ return bool(self.iteritems())
diff --git a/paste/util/__init__.py b/paste/util/__init__.py
new file mode 100644
index 0000000..ea4ff1e
--- /dev/null
+++ b/paste/util/__init__.py
@@ -0,0 +1,4 @@
+"""
+Package for miscellaneous routines that do not depend on other parts
+of Paste
+"""
diff --git a/paste/util/classinit.py b/paste/util/classinit.py
new file mode 100644
index 0000000..e4e6b28
--- /dev/null
+++ b/paste/util/classinit.py
@@ -0,0 +1,42 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class ClassInitMeta(type):
+
+ def __new__(meta, class_name, bases, new_attrs):
+ cls = type.__new__(meta, class_name, bases, new_attrs)
+ if (new_attrs.has_key('__classinit__')
+ and not isinstance(cls.__classinit__, staticmethod)):
+ setattr(cls, '__classinit__',
+ staticmethod(cls.__classinit__.im_func))
+ if hasattr(cls, '__classinit__'):
+ cls.__classinit__(cls, new_attrs)
+ return cls
+
+def build_properties(cls, new_attrs):
+ """
+ Given a class and a new set of attributes (as passed in by
+ __classinit__), create or modify properties based on functions
+ with special names ending in __get, __set, and __del.
+ """
+ for name, value in new_attrs.items():
+ if (name.endswith('__get') or name.endswith('__set')
+ or name.endswith('__del')):
+ base = name[:-5]
+ if hasattr(cls, base):
+ old_prop = getattr(cls, base)
+ if not isinstance(old_prop, property):
+ raise ValueError(
+ "Attribute %s is a %s, not a property; function %s is named like a property"
+ % (base, type(old_prop), name))
+ attrs = {'fget': old_prop.fget,
+ 'fset': old_prop.fset,
+ 'fdel': old_prop.fdel,
+ 'doc': old_prop.__doc__}
+ else:
+ attrs = {}
+ attrs['f' + name[-3:]] = value
+ if name.endswith('__get') and value.__doc__:
+ attrs['doc'] = value.__doc__
+ new_prop = property(**attrs)
+ setattr(cls, base, new_prop)
diff --git a/paste/util/classinstance.py b/paste/util/classinstance.py
new file mode 100644
index 0000000..ac2be3e
--- /dev/null
+++ b/paste/util/classinstance.py
@@ -0,0 +1,38 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class classinstancemethod(object):
+ """
+ Acts like a class method when called from a class, like an
+ instance method when called by an instance. The method should
+ take two arguments, 'self' and 'cls'; one of these will be None
+ depending on how the method was called.
+ """
+
+ def __init__(self, func):
+ self.func = func
+ self.__doc__ = func.__doc__
+
+ def __get__(self, obj, type=None):
+ return _methodwrapper(self.func, obj=obj, type=type)
+
+class _methodwrapper(object):
+
+ def __init__(self, func, obj, type):
+ self.func = func
+ self.obj = obj
+ self.type = type
+
+ def __call__(self, *args, **kw):
+ assert not kw.has_key('self') and not kw.has_key('cls'), (
+ "You cannot use 'self' or 'cls' arguments to a "
+ "classinstancemethod")
+ return self.func(*((self.obj, self.type) + args), **kw)
+
+ def __repr__(self):
+ if self.obj is None:
+ return ('<bound class method %s.%s>'
+ % (self.type.__name__, self.func.func_name))
+ else:
+ return ('<bound method %s.%s of %r>'
+ % (self.type.__name__, self.func.func_name, self.obj))
diff --git a/paste/util/converters.py b/paste/util/converters.py
new file mode 100644
index 0000000..f0ad349
--- /dev/null
+++ b/paste/util/converters.py
@@ -0,0 +1,26 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+def asbool(obj):
+ if isinstance(obj, (str, unicode)):
+ obj = obj.strip().lower()
+ if obj in ['true', 'yes', 'on', 'y', 't', '1']:
+ return True
+ elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
+ return False
+ else:
+ raise ValueError(
+ "String is not true/false: %r" % obj)
+ return bool(obj)
+
+def aslist(obj, sep=None, strip=True):
+ if isinstance(obj, (str, unicode)):
+ lst = obj.split(sep)
+ if strip:
+ lst = [v.strip() for v in lst]
+ return lst
+ elif isinstance(obj, (list, tuple)):
+ return obj
+ elif obj is None:
+ return []
+ else:
+ return [obj]
diff --git a/paste/util/dateinterval.py b/paste/util/dateinterval.py
new file mode 100644
index 0000000..2195ab2
--- /dev/null
+++ b/paste/util/dateinterval.py
@@ -0,0 +1,103 @@
+"""
+DateInterval.py
+
+Convert interval strings (in the form of 1w2d, etc) to
+seconds, and back again. Is not exactly about months or
+years (leap years in particular).
+
+Accepts (y)ear, (b)month, (w)eek, (d)ay, (h)our, (m)inute, (s)econd.
+
+Exports only timeEncode and timeDecode functions.
+"""
+
+import re
+
+__all__ = ['interval_decode', 'interval_encode']
+
+second = 1
+minute = second*60
+hour = minute*60
+day = hour*24
+week = day*7
+month = day*30
+year = day*365
+timeValues = {
+ 'y': year,
+ 'b': month,
+ 'w': week,
+ 'd': day,
+ 'h': hour,
+ 'm': minute,
+ 's': second,
+ }
+timeOrdered = timeValues.items()
+timeOrdered.sort(lambda a, b: -cmp(a[1], b[1]))
+
+def interval_encode(seconds, include_sign=False):
+ """Encodes a number of seconds (representing a time interval)
+ into a form like 1h2d3s.
+
+ >>> interval_encode(10)
+ '10s'
+ >>> interval_encode(493939)
+ '5d17h12m19s'
+ """
+ s = ''
+ orig = seconds
+ seconds = abs(seconds)
+ for char, amount in timeOrdered:
+ if seconds >= amount:
+ i, seconds = divmod(seconds, amount)
+ s += '%i%s' % (i, char)
+ if orig < 0:
+ s = '-' + s
+ elif not orig:
+ return '0'
+ elif include_sign:
+ s = '+' + s
+ return s
+
+_timeRE = re.compile(r'[0-9]+[a-zA-Z]')
+def interval_decode(s):
+ """Decodes a number in the format 1h4d3m (1 hour, 3 days, 3 minutes)
+ into a number of seconds
+
+ >>> interval_decode('40s')
+ 40
+ >>> interval_decode('10000s')
+ 10000
+ >>> interval_decode('3d1w45s')
+ 864045
+ """
+ time = 0
+ sign = 1
+ s = s.strip()
+ if s.startswith('-'):
+ s = s[1:]
+ sign = -1
+ elif s.startswith('+'):
+ s = s[1:]
+ for match in allMatches(s, _timeRE):
+ char = match.group(0)[-1].lower()
+ if not timeValues.has_key(char):
+ # @@: should signal error
+ continue
+ time += int(match.group(0)[:-1]) * timeValues[char]
+ return time
+
+# @@-sgd 2002-12-23 - this function does not belong in this module, find a better place.
+def allMatches(source, regex):
+ """Return a list of matches for regex in source
+ """
+ pos = 0
+ end = len(source)
+ rv = []
+ match = regex.search(source, pos)
+ while match:
+ rv.append(match)
+ match = regex.search(source, match.end() )
+ return rv
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/datetimeutil.py b/paste/util/datetimeutil.py
new file mode 100644
index 0000000..b1a7f38
--- /dev/null
+++ b/paste/util/datetimeutil.py
@@ -0,0 +1,361 @@
+# (c) 2005 Clark C. Evans and contributors
+# This module is part of the Python Paste Project and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+# Some of this code was funded by: http://prometheusresearch.com
+"""
+Date, Time, and Timespan Parsing Utilities
+
+This module contains parsing support to create "human friendly"
+``datetime`` object parsing. The explicit goal of these routines is
+to provide a multi-format date/time support not unlike that found in
+Microsoft Excel. In most approaches, the input is very "strict" to
+prevent errors -- however, this approach is much more liberal since we
+are assuming the user-interface is parroting back the normalized value
+and thus the user has immediate feedback if the data is not typed in
+correctly.
+
+ ``parse_date`` and ``normalize_date``
+
+ These functions take a value like '9 jan 2007' and returns either an
+ ``date`` object, or an ISO 8601 formatted date value such
+ as '2007-01-09'. There is an option to provide an Oracle database
+ style output as well, ``09 JAN 2007``, but this is not the default.
+
+ This module always treats '/' delimiters as using US date order
+ (since the author's clients are US based), hence '1/9/2007' is
+ January 9th. Since this module treats the '-' as following
+ European order this supports both modes of data-entry; together
+ with immediate parroting back the result to the screen, the author
+ has found this approach to work well in pratice.
+
+ ``parse_time`` and ``normalize_time``
+
+ These functions take a value like '1 pm' and returns either an
+ ``time`` object, or an ISO 8601 formatted 24h clock time
+ such as '13:00'. There is an option to provide for US style time
+ values, '1:00 PM', however this is not the default.
+
+ ``parse_datetime`` and ``normalize_datetime``
+
+ These functions take a value like '9 jan 2007 at 1 pm' and returns
+ either an ``datetime`` object, or an ISO 8601 formatted
+ return (without the T) such as '2007-01-09 13:00'. There is an
+ option to provide for Oracle / US style, '09 JAN 2007 @ 1:00 PM',
+ however this is not the default.
+
+ ``parse_delta`` and ``normalize_delta``
+
+ These functions take a value like '1h 15m' and returns either an
+ ``timedelta`` object, or an 2-decimal fixed-point
+ numerical value in hours, such as '1.25'. The rationale is to
+ support meeting or time-billing lengths, not to be an accurate
+ representation in mili-seconds. As such not all valid
+ ``timedelta`` values will have a normalized representation.
+
+"""
+from datetime import timedelta, time, date
+from time import localtime
+import string
+
+__all__ = ['parse_timedelta', 'normalize_timedelta',
+ 'parse_time', 'normalize_time',
+ 'parse_date', 'normalize_date']
+
+def _number(val):
+ try:
+ return string.atoi(val)
+ except:
+ return None
+
+#
+# timedelta
+#
+def parse_timedelta(val):
+ """
+ returns a ``timedelta`` object, or None
+ """
+ if not val:
+ return None
+ val = string.lower(val)
+ if "." in val:
+ val = float(val)
+ return timedelta(hours=int(val), minutes=60*(val % 1.0))
+ fHour = ("h" in val or ":" in val)
+ fMin = ("m" in val or ":" in val)
+ fFraction = "." in val
+ for noise in "minu:teshour()":
+ val = string.replace(val, noise, ' ')
+ val = string.strip(val)
+ val = string.split(val)
+ hr = 0.0
+ mi = 0
+ val.reverse()
+ if fHour:
+ hr = int(val.pop())
+ if fMin:
+ mi = int(val.pop())
+ if len(val) > 0 and not hr:
+ hr = int(val.pop())
+ return timedelta(hours=hr, minutes=mi)
+
+def normalize_timedelta(val):
+ """
+ produces a normalized string value of the timedelta
+
+ This module returns a normalized time span value consisting of the
+ number of hours in fractional form. For example '1h 15min' is
+ formatted as 01.25.
+ """
+ if type(val) == str:
+ val = parse_timedelta(val)
+ if not val:
+ return ''
+ hr = val.seconds/3600
+ mn = (val.seconds % 3600)/60
+ return "%d.%02d" % (hr, mn * 100/60)
+
+#
+# time
+#
+def parse_time(val):
+ if not val:
+ return None
+ hr = mi = 0
+ val = string.lower(val)
+ amflag = (-1 != string.find(val, 'a')) # set if AM is found
+ pmflag = (-1 != string.find(val, 'p')) # set if PM is found
+ for noise in ":amp.":
+ val = string.replace(val, noise, ' ')
+ val = string.split(val)
+ if len(val) > 1:
+ hr = int(val[0])
+ mi = int(val[1])
+ else:
+ val = val[0]
+ if len(val) < 1:
+ pass
+ elif 'now' == val:
+ tm = localtime()
+ hr = tm[3]
+ mi = tm[4]
+ elif 'noon' == val:
+ hr = 12
+ elif len(val) < 3:
+ hr = int(val)
+ if not amflag and not pmflag and hr < 7:
+ hr += 12
+ elif len(val) < 5:
+ hr = int(val[:-2])
+ mi = int(val[-2:])
+ else:
+ hr = int(val[:1])
+ if amflag and hr >= 12:
+ hr = hr - 12
+ if pmflag and hr < 12:
+ hr = hr + 12
+ return time(hr, mi)
+
+def normalize_time(value, ampm):
+ if not value:
+ return ''
+ if type(value) == str:
+ value = parse_time(value)
+ if not ampm:
+ return "%02d:%02d" % (value.hour, value.minute)
+ hr = value.hour
+ am = "AM"
+ if hr < 1 or hr > 23:
+ hr = 12
+ elif hr >= 12:
+ am = "PM"
+ if hr > 12:
+ hr = hr - 12
+ return "%02d:%02d %s" % (hr, value.minute, am)
+
+#
+# Date Processing
+#
+
+_one_day = timedelta(days=1)
+
+_str2num = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6,
+ 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 }
+
+def _month(val):
+ for (key, mon) in _str2num.items():
+ if key in val:
+ return mon
+ raise TypeError("unknown month '%s'" % val)
+
+_days_in_month = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30,
+ 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31,
+ }
+_num2str = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
+ 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec',
+ }
+_wkdy = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
+
+def parse_date(val):
+ if not(val):
+ return None
+ val = string.lower(val)
+ now = None
+
+ # optimized check for YYYY-MM-DD
+ strict = val.split("-")
+ if len(strict) == 3:
+ (y, m, d) = strict
+ if "+" in d:
+ d = d.split("+")[0]
+ if " " in d:
+ d = d.split(" ")[0]
+ try:
+ now = date(int(y), int(m), int(d))
+ val = "xxx" + val[10:]
+ except ValueError:
+ pass
+
+ # allow for 'now', 'mon', 'tue', etc.
+ if not now:
+ chk = val[:3]
+ if chk in ('now','tod'):
+ now = date.today()
+ elif chk in _wkdy:
+ now = date.today()
+ idx = list(_wkdy).index(chk) + 1
+ while now.isoweekday() != idx:
+ now += _one_day
+
+ # allow dates to be modified via + or - /w number of days, so
+ # that now+3 is three days from now
+ if now:
+ tail = val[3:].strip()
+ tail = tail.replace("+"," +").replace("-"," -")
+ for item in tail.split():
+ try:
+ days = int(item)
+ except ValueError:
+ pass
+ else:
+ now += timedelta(days=days)
+ return now
+
+ # ok, standard parsing
+ yr = mo = dy = None
+ for noise in ('/', '-', ',', '*'):
+ val = string.replace(val, noise, ' ')
+ for noise in _wkdy:
+ val = string.replace(val, noise, ' ')
+ out = []
+ last = False
+ ldig = False
+ for ch in val:
+ if ch.isdigit():
+ if last and not ldig:
+ out.append(' ')
+ last = ldig = True
+ else:
+ if ldig:
+ out.append(' ')
+ ldig = False
+ last = True
+ out.append(ch)
+ val = string.split("".join(out))
+ if 3 == len(val):
+ a = _number(val[0])
+ b = _number(val[1])
+ c = _number(val[2])
+ if len(val[0]) == 4:
+ yr = a
+ if b: # 1999 6 23
+ mo = b
+ dy = c
+ else: # 1999 Jun 23
+ mo = _month(val[1])
+ dy = c
+ elif a > 0:
+ yr = c
+ if len(val[2]) < 4:
+ raise TypeError("four digit year required")
+ if b: # 6 23 1999
+ dy = b
+ mo = a
+ else: # 23 Jun 1999
+ dy = a
+ mo = _month(val[1])
+ else: # Jun 23, 2000
+ dy = b
+ yr = c
+ if len(val[2]) < 4:
+ raise TypeError("four digit year required")
+ mo = _month(val[0])
+ elif 2 == len(val):
+ a = _number(val[0])
+ b = _number(val[1])
+ if a > 999:
+ yr = a
+ dy = 1
+ if b > 0: # 1999 6
+ mo = b
+ else: # 1999 Jun
+ mo = _month(val[1])
+ elif a > 0:
+ if b > 999: # 6 1999
+ mo = a
+ yr = b
+ dy = 1
+ elif b > 0: # 6 23
+ mo = a
+ dy = b
+ else: # 23 Jun
+ dy = a
+ mo = _month(val[1])
+ else:
+ if b > 999: # Jun 2001
+ yr = b
+ dy = 1
+ else: # Jun 23
+ dy = b
+ mo = _month(val[0])
+ elif 1 == len(val):
+ val = val[0]
+ if not val.isdigit():
+ mo = _month(val)
+ if mo is not None:
+ dy = 1
+ else:
+ v = _number(val)
+ val = str(v)
+ if 8 == len(val): # 20010623
+ yr = _number(val[:4])
+ mo = _number(val[4:6])
+ dy = _number(val[6:])
+ elif len(val) in (3,4):
+ if v > 1300: # 2004
+ yr = v
+ mo = 1
+ dy = 1
+ else: # 1202
+ mo = _number(val[:-2])
+ dy = _number(val[-2:])
+ elif v < 32:
+ dy = v
+ else:
+ raise TypeError("four digit year required")
+ tm = localtime()
+ if mo is None:
+ mo = tm[1]
+ if dy is None:
+ dy = tm[2]
+ if yr is None:
+ yr = tm[0]
+ return date(yr, mo, dy)
+
+def normalize_date(val, iso8601=True):
+ if not val:
+ return ''
+ if type(val) == str:
+ val = parse_date(val)
+ if iso8601:
+ return "%4d-%02d-%02d" % (val.year, val.month, val.day)
+ return "%02d %s %4d" % (val.day, _num2str[val.month], val.year)
diff --git a/paste/util/doctest24.py b/paste/util/doctest24.py
new file mode 100644
index 0000000..28849ed
--- /dev/null
+++ b/paste/util/doctest24.py
@@ -0,0 +1,2665 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re, types
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ flag = 1 << len(OPTIONFLAGS_BY_NAME)
+ OPTIONFLAGS_BY_NAME[name] = flag
+ return flag
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+
+ >>> is_private("a.b", "my_func")
+ False
+ >>> is_private("____", "_my_func")
+ True
+ >>> is_private("someclass", "__init__")
+ False
+ >>> is_private("sometypo", "__init_")
+ True
+ >>> is_private("x.y.z", "_")
+ True
+ >>> is_private("_x.y.z", "__")
+ False
+ >>> is_private("", "") # senseless but consistent
+ False
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, (str, unicode)):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ """
+ Essentially the only subtle case:
+ >>> _ellipsis_match('aa...aa', 'aaa')
+ False
+ """
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError, 'Expected a module: %r' % module
+ if path.startswith('/'):
+ raise ValueError, 'Module-relative files may not have absolute paths'
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, basestring), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.func_globals
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print 'Finding tests in %s' % name
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, basestring):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, basestring)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).im_func
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, basestring):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, basestring):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.im_func
+ if inspect.isfunction(obj): obj = obj.func_code
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ """
+ A class used to run DocTest test cases, and accumulate statistics.
+ The `run` method is used to process a single DocTest case. It
+ returns a tuple `(f, t)`, where `t` is the number of test cases
+ tried, and `f` is the number of test cases that failed.
+
+ >>> tests = DocTestFinder().find(_TestClass)
+ >>> runner = DocTestRunner(verbose=False)
+ >>> for test in tests:
+ ... print runner.run(test)
+ (0, 2)
+ (0, 1)
+ (0, 2)
+ (0, 2)
+
+ The `summarize` method prints a summary of all the test cases that
+ have been run by the runner, and returns an aggregated `(f, t)`
+ tuple:
+
+ >>> runner.summarize(verbose=1)
+ 4 items passed all tests:
+ 2 tests in _TestClass
+ 2 tests in _TestClass.__init__
+ 2 tests in _TestClass.get
+ 1 tests in _TestClass.square
+ 7 tests in 4 items.
+ 7 passed and 0 failed.
+ Test passed.
+ (0, 7)
+
+ The aggregated number of tried examples and failed examples is
+ also available via the `tries` and `failures` attributes:
+
+ >>> runner.tries
+ 7
+ >>> runner.failures
+ 0
+
+ The comparison between expected outputs and actual outputs is done
+ by an `OutputChecker`. This comparison may be customized with a
+ number of option flags; see the documentation for `testmod` for
+ more information. If the option flags are insufficient, then the
+ comparison may also be customized by passing a subclass of
+ `OutputChecker` to the constructor.
+
+ The test runner's display output can be controlled in two ways.
+ First, an output function (`out) can be passed to
+ `TestRunner.run`; this function will be called with strings that
+ should be displayed. It defaults to `sys.stdout.write`. If
+ capturing the output is not sufficient, then the display output
+ can be also customized by subclassing DocTestRunner, and
+ overriding the methods `report_start`, `report_success`,
+ `report_unexpected_exception`, and `report_failure`.
+ """
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec compile(example.source, filename, "single",
+ compileflags, 1) in test.globs
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename, module_globals=None):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)#?, module_globals)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print len(notests), "items had no tests:"
+ notests.sort()
+ for thing in notests:
+ print " ", thing
+ if passed:
+ print len(passed), "items passed all tests:"
+ passed.sort()
+ for thing, count in passed:
+ print " %3d tests in %s" % (count, thing)
+ if failed:
+ print self.DIVIDER
+ print len(failed), "items had failures:"
+ failed.sort()
+ for thing, (f, t) in failed:
+ print " %3d of %3d in %s" % (f, t, thing)
+ if verbose:
+ print totalt, "tests in", len(self._name2ft), "items."
+ print totalt - totalf, "passed and", totalf, "failed."
+ if totalf:
+ print "***Test Failed***", totalf, "failures."
+ elif verbose:
+ print "Test passed."
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ print "*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes."
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+ r"""Run doc tests but raise an exception as soon as there is a failure.
+
+ If an unexpected exception occurs, an UnexpectedException is raised.
+ It contains the test, the example, and the original exception:
+
+ >>> runner = DebugRunner(verbose=False)
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> try:
+ ... runner.run(test)
+ ... except UnexpectedException, failure:
+ ... pass
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ We wrap the original exception to give the calling application
+ access to the test and example information.
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> try:
+ ... runner.run(test)
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ If a failure or error occurs, the globals are left intact:
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 1}
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... >>> raise KeyError
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ Traceback (most recent call last):
+ ...
+ UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 2}
+
+ But the globals are cleared if there is no error:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ (0, 1)
+
+ >>> test.globs
+ {}
+
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print "Running string", name
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print f, "of", t, "examples failed in string", name
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ """Sets the unittest option flags.
+
+ The old flag is returned so that a runner could restore the old
+ value if it wished to:
+
+ >>> old = _unittest_reportflags
+ >>> set_unittest_reportflags(REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE) == old
+ True
+
+ >>> import doctest
+ >>> doctest._unittest_reportflags == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+
+ Only reporting flags can be set:
+
+ >>> set_unittest_reportflags(ELLIPSIS)
+ Traceback (most recent call last):
+ ...
+ ValueError: ('Only reporting flags allowed', 8)
+
+ >>> set_unittest_reportflags(old) == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+ """
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ r"""Run the test case without results and without catching exceptions
+
+ The unit test framework includes a debug method on test cases
+ and test suites to support post-mortem debugging. The test code
+ is run in such a way that errors are not caught. This way a
+ caller can catch the errors and initiate post-mortem debugging.
+
+ The DocTestCase provides a debug method that raises
+ UnexpectedException errors if there is an unexepcted
+ exception:
+
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+ >>> try:
+ ... case.debug()
+ ... except UnexpectedException, failure:
+ ... pass
+
+ The UnexpectedException contains the test, the example, and
+ the original exception:
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+
+ >>> try:
+ ... case.debug()
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ """
+
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ r"""Extract script from text with examples.
+
+ Converts text with examples to a Python script. Example input is
+ converted to regular code. Example output and all other words
+ are converted to comments:
+
+ >>> text = '''
+ ... Here are examples of simple math.
+ ...
+ ... Python has super accurate integer addition
+ ...
+ ... >>> 2 + 2
+ ... 5
+ ...
+ ... And very friendly error messages:
+ ...
+ ... >>> 1/0
+ ... To Infinity
+ ... And
+ ... Beyond
+ ...
+ ... You can use logic if you want:
+ ...
+ ... >>> if 0:
+ ... ... blah
+ ... ... blah
+ ... ...
+ ...
+ ... Ho hum
+ ... '''
+
+ >>> print script_from_examples(text)
+ # Here are examples of simple math.
+ #
+ # Python has super accurate integer addition
+ #
+ 2 + 2
+ # Expected:
+ ## 5
+ #
+ # And very friendly error messages:
+ #
+ 1/0
+ # Expected:
+ ## To Infinity
+ ## And
+ ## Beyond
+ #
+ # You can use logic if you want:
+ #
+ if 0:
+ blah
+ blah
+ #
+ # Ho hum
+ """
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ return '\n'.join(output)
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ execfile(srcfilename, globs, globs)
+ except:
+ print sys.exc_info()[1]
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+######################################################################
+## 10. Example Usage
+######################################################################
+class _TestClass:
+ """
+ A pointless class, for sanity-checking of docstring testing.
+
+ Methods:
+ square()
+ get()
+
+ >>> _TestClass(13).get() + _TestClass(-12).get()
+ 1
+ >>> hex(_TestClass(13).square().get())
+ '0xa9'
+ """
+
+ def __init__(self, val):
+ """val -> _TestClass object with associated value val.
+
+ >>> t = _TestClass(123)
+ >>> print t.get()
+ 123
+ """
+
+ self.val = val
+
+ def square(self):
+ """square() -> square TestClass's associated value
+
+ >>> _TestClass(13).square().get()
+ 169
+ """
+
+ self.val = self.val ** 2
+ return self
+
+ def get(self):
+ """get() -> return TestClass's associated value.
+
+ >>> x = _TestClass(-42)
+ >>> print x.get()
+ -42
+ """
+
+ return self.val
+
+__test__ = {"_TestClass": _TestClass,
+ "string": r"""
+ Example of a string object, searched as-is.
+ >>> x = 1; y = 2
+ >>> x + y, x * y
+ (3, 2)
+ """,
+
+ "bool-int equivalence": r"""
+ In 2.2, boolean expressions displayed
+ 0 or 1. By default, we still accept
+ them. This can be disabled by passing
+ DONT_ACCEPT_TRUE_FOR_1 to the new
+ optionflags argument.
+ >>> 4 == 4
+ 1
+ >>> 4 == 4
+ True
+ >>> 4 > 4
+ 0
+ >>> 4 > 4
+ False
+ """,
+
+ "blank lines": r"""
+ Blank lines can be marked with <BLANKLINE>:
+ >>> print 'foo\n\nbar\n'
+ foo
+ <BLANKLINE>
+ bar
+ <BLANKLINE>
+ """,
+
+ "ellipsis": r"""
+ If the ellipsis flag is used, then '...' can be used to
+ elide substrings in the desired output:
+ >>> print range(1000) #doctest: +ELLIPSIS
+ [0, 1, 2, ..., 999]
+ """,
+
+ "whitespace normalization": r"""
+ If the whitespace normalization flag is used, then
+ differences in whitespace are ignored.
+ >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29]
+ """,
+ }
+
+def _test():
+ r = unittest.TextTestRunner()
+ r.run(DocTestSuite())
+
+if __name__ == "__main__":
+ _test()
diff --git a/paste/util/filemixin.py b/paste/util/filemixin.py
new file mode 100644
index 0000000..10a9e7c
--- /dev/null
+++ b/paste/util/filemixin.py
@@ -0,0 +1,53 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+class FileMixin(object):
+
+ """
+ Used to provide auxiliary methods to objects simulating files.
+ Objects must implement write, and read if they are input files.
+ Also they should implement close.
+
+ Other methods you may wish to override:
+ * flush()
+ * seek(offset[, whence])
+ * tell()
+ * truncate([size])
+
+ Attributes you may wish to provide:
+ * closed
+ * encoding (you should also respect that in write())
+ * mode
+ * newlines (hard to support)
+ * softspace
+ """
+
+ def flush(self):
+ pass
+
+ def next(self):
+ return self.readline()
+
+ def readline(self, size=None):
+ # @@: This is a lame implementation; but a buffer would probably
+ # be necessary for a better implementation
+ output = []
+ while 1:
+ next = self.read(1)
+ if not next:
+ return ''.join(output)
+ output.append(next)
+ if size and size > 0 and len(output) >= size:
+ return ''.join(output)
+ if next == '\n':
+ # @@: also \r?
+ return ''.join(output)
+
+ def xreadlines(self):
+ return self
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+
diff --git a/paste/util/finddata.py b/paste/util/finddata.py
new file mode 100644
index 0000000..af29c04
--- /dev/null
+++ b/paste/util/finddata.py
@@ -0,0 +1,99 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+# Note: you may want to copy this into your setup.py file verbatim, as
+# you can't import this from another package, when you don't know if
+# that package is installed yet.
+
+import os
+import sys
+from fnmatch import fnmatchcase
+from distutils.util import convert_path
+
+# Provided as an attribute, so you can append to these instead
+# of replicating them:
+standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
+standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
+ './dist', 'EGG-INFO', '*.egg-info')
+
+def find_package_data(
+ where='.', package='',
+ exclude=standard_exclude,
+ exclude_directories=standard_exclude_directories,
+ only_in_packages=True,
+ show_ignored=False):
+ """
+ Return a dictionary suitable for use in ``package_data``
+ in a distutils ``setup.py`` file.
+
+ The dictionary looks like::
+
+ {'package': [files]}
+
+ Where ``files`` is a list of all the files in that package that
+ don't match anything in ``exclude``.
+
+ If ``only_in_packages`` is true, then top-level directories that
+ are not packages won't be included (but directories under packages
+ will).
+
+ Directories matching any pattern in ``exclude_directories`` will
+ be ignored; by default directories with leading ``.``, ``CVS``,
+ and ``_darcs`` will be ignored.
+
+ If ``show_ignored`` is true, then all the files that aren't
+ included in package data are shown on stderr (for debugging
+ purposes).
+
+ Note patterns use wildcards, or can be exact paths (including
+ leading ``./``), and all searching is case-insensitive.
+ """
+
+ out = {}
+ stack = [(convert_path(where), '', package, only_in_packages)]
+ while stack:
+ where, prefix, package, only_in_packages = stack.pop(0)
+ for name in os.listdir(where):
+ fn = os.path.join(where, name)
+ if os.path.isdir(fn):
+ bad_name = False
+ for pattern in exclude_directories:
+ if (fnmatchcase(name, pattern)
+ or fn.lower() == pattern.lower()):
+ bad_name = True
+ if show_ignored:
+ print >> sys.stderr, (
+ "Directory %s ignored by pattern %s"
+ % (fn, pattern))
+ break
+ if bad_name:
+ continue
+ if (os.path.isfile(os.path.join(fn, '__init__.py'))
+ and not prefix):
+ if not package:
+ new_package = name
+ else:
+ new_package = package + '.' + name
+ stack.append((fn, '', new_package, False))
+ else:
+ stack.append((fn, prefix + name + '/', package, only_in_packages))
+ elif package or not only_in_packages:
+ # is a file
+ bad_name = False
+ for pattern in exclude:
+ if (fnmatchcase(name, pattern)
+ or fn.lower() == pattern.lower()):
+ bad_name = True
+ if show_ignored:
+ print >> sys.stderr, (
+ "File %s ignored by pattern %s"
+ % (fn, pattern))
+ break
+ if bad_name:
+ continue
+ out.setdefault(package, []).append(prefix+name)
+ return out
+
+if __name__ == '__main__':
+ import pprint
+ pprint.pprint(
+ find_package_data(show_ignored=True))
diff --git a/paste/util/findpackage.py b/paste/util/findpackage.py
new file mode 100644
index 0000000..68b5e8b
--- /dev/null
+++ b/paste/util/findpackage.py
@@ -0,0 +1,26 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+import sys
+import os
+
+def find_package(dir):
+ """
+ Given a directory, finds the equivalent package name. If it
+ is directly in sys.path, returns ''.
+ """
+ dir = os.path.abspath(dir)
+ orig_dir = dir
+ path = map(os.path.abspath, sys.path)
+ packages = []
+ last_dir = None
+ while 1:
+ if dir in path:
+ return '.'.join(packages)
+ packages.insert(0, os.path.basename(dir))
+ dir = os.path.dirname(dir)
+ if last_dir == dir:
+ raise ValueError(
+ "%s is not under any path found in sys.path" % orig_dir)
+ last_dir = dir
+
diff --git a/paste/util/import_string.py b/paste/util/import_string.py
new file mode 100644
index 0000000..3feb4dd
--- /dev/null
+++ b/paste/util/import_string.py
@@ -0,0 +1,95 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+'imports' a string -- converts a string to a Python object, importing
+any necessary modules and evaluating the expression. Everything
+before the : in an import expression is the module path; everything
+after is an expression to be evaluated in the namespace of that
+module.
+
+Alternately, if no : is present, then import the modules and get the
+attributes as necessary. Arbitrary expressions are not allowed in
+that case.
+"""
+
+def eval_import(s):
+ """
+ Import a module, or import an object from a module.
+
+ A module name like ``foo.bar:baz()`` can be used, where
+ ``foo.bar`` is the module, and ``baz()`` is an expression
+ evaluated in the context of that module. Note this is not safe on
+ arbitrary strings because of the eval.
+ """
+ if ':' not in s:
+ return simple_import(s)
+ module_name, expr = s.split(':', 1)
+ module = import_module(module_name)
+ obj = eval(expr, module.__dict__)
+ return obj
+
+def simple_import(s):
+ """
+ Import a module, or import an object from a module.
+
+ A name like ``foo.bar.baz`` can be a module ``foo.bar.baz`` or a
+ module ``foo.bar`` with an object ``baz`` in it, or a module
+ ``foo`` with an object ``bar`` with an attribute ``baz``.
+ """
+ parts = s.split('.')
+ module = import_module(parts[0])
+ name = parts[0]
+ parts = parts[1:]
+ last_import_error = None
+ while parts:
+ name += '.' + parts[0]
+ try:
+ module = import_module(name)
+ parts = parts[1:]
+ except ImportError, e:
+ last_import_error = e
+ break
+ obj = module
+ while parts:
+ try:
+ obj = getattr(module, parts[0])
+ except AttributeError:
+ raise ImportError(
+ "Cannot find %s in module %r (stopped importing modules with error %s)" % (parts[0], module, last_import_error))
+ parts = parts[1:]
+ return obj
+
+def import_module(s):
+ """
+ Import a module.
+ """
+ mod = __import__(s)
+ parts = s.split('.')
+ for part in parts[1:]:
+ mod = getattr(mod, part)
+ return mod
+
+def try_import_module(module_name):
+ """
+ Imports a module, but catches import errors. Only catches errors
+ when that module doesn't exist; if that module itself has an
+ import error it will still get raised. Returns None if the module
+ doesn't exist.
+ """
+ try:
+ return import_module(module_name)
+ except ImportError, e:
+ if not getattr(e, 'args', None):
+ raise
+ desc = e.args[0]
+ if not desc.startswith('No module named '):
+ raise
+ desc = desc[len('No module named '):]
+ # If you import foo.bar.baz, the bad import could be any
+ # of foo.bar.baz, bar.baz, or baz; we'll test them all:
+ parts = module_name.split('.')
+ for i in range(len(parts)):
+ if desc == '.'.join(parts[i:]):
+ return None
+ raise
diff --git a/paste/util/intset.py b/paste/util/intset.py
new file mode 100644
index 0000000..3873c75
--- /dev/null
+++ b/paste/util/intset.py
@@ -0,0 +1,511 @@
+# -*- coding: iso-8859-15 -*-
+"""Immutable integer set type.
+
+Integer set class.
+
+Copyright (C) 2006, Heiko Wundram.
+Released under the MIT license.
+"""
+
+# Version information
+# -------------------
+
+__author__ = "Heiko Wundram <me@modelnine.org>"
+__version__ = "0.2"
+__revision__ = "6"
+__date__ = "2006-01-20"
+
+
+# Utility classes
+# ---------------
+
+class _Infinity(object):
+ """Internal type used to represent infinity values."""
+
+ __slots__ = ["_neg"]
+
+ def __init__(self,neg):
+ self._neg = neg
+
+ def __lt__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return ( self._neg and
+ not ( isinstance(value,_Infinity) and value._neg ) )
+
+ def __le__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return self._neg
+
+ def __gt__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not ( self._neg or
+ ( isinstance(value,_Infinity) and not value._neg ) )
+
+ def __ge__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not self._neg
+
+ def __eq__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return isinstance(value,_Infinity) and self._neg == value._neg
+
+ def __ne__(self,value):
+ if not isinstance(value,(int,long,_Infinity)):
+ return NotImplemented
+ return not isinstance(value,_Infinity) or self._neg <> value._neg
+
+ def __repr__(self):
+ return "None"
+
+
+# Constants
+# ---------
+
+_MININF = _Infinity(True)
+_MAXINF = _Infinity(False)
+
+
+# Integer set class
+# -----------------
+
+class IntSet(object):
+ """Integer set class with efficient storage in a RLE format of ranges.
+ Supports minus and plus infinity in the range."""
+
+ __slots__ = ["_ranges","_min","_max","_hash"]
+
+ def __init__(self,*args,**kwargs):
+ """Initialize an integer set. The constructor accepts an unlimited
+ number of arguments that may either be tuples in the form of
+ (start,stop) where either start or stop may be a number or None to
+ represent maximum/minimum in that direction. The range specified by
+ (start,stop) is always inclusive (differing from the builtin range
+ operator).
+
+ Keyword arguments that can be passed to an integer set are min and
+ max, which specify the minimum and maximum number in the set,
+ respectively. You can also pass None here to represent minus or plus
+ infinity, which is also the default.
+ """
+
+ # Special case copy constructor.
+ if len(args) == 1 and isinstance(args[0],IntSet):
+ if kwargs:
+ raise ValueError("No keyword arguments for copy constructor.")
+ self._min = args[0]._min
+ self._max = args[0]._max
+ self._ranges = args[0]._ranges
+ self._hash = args[0]._hash
+ return
+
+ # Initialize set.
+ self._ranges = []
+
+ # Process keyword arguments.
+ self._min = kwargs.pop("min",_MININF)
+ self._max = kwargs.pop("max",_MAXINF)
+ if self._min is None:
+ self._min = _MININF
+ if self._max is None:
+ self._max = _MAXINF
+
+ # Check keyword arguments.
+ if kwargs:
+ raise ValueError("Invalid keyword argument.")
+ if not ( isinstance(self._min,(int,long)) or self._min is _MININF ):
+ raise TypeError("Invalid type of min argument.")
+ if not ( isinstance(self._max,(int,long)) or self._max is _MAXINF ):
+ raise TypeError("Invalid type of max argument.")
+ if ( self._min is not _MININF and self._max is not _MAXINF and
+ self._min > self._max ):
+ raise ValueError("Minimum is not smaller than maximum.")
+ if isinstance(self._max,(int,long)):
+ self._max += 1
+
+ # Process arguments.
+ for arg in args:
+ if isinstance(arg,(int,long)):
+ start, stop = arg, arg+1
+ elif isinstance(arg,tuple):
+ if len(arg) <> 2:
+ raise ValueError("Invalid tuple, must be (start,stop).")
+
+ # Process argument.
+ start, stop = arg
+ if start is None:
+ start = self._min
+ if stop is None:
+ stop = self._max
+
+ # Check arguments.
+ if not ( isinstance(start,(int,long)) or start is _MININF ):
+ raise TypeError("Invalid type of tuple start.")
+ if not ( isinstance(stop,(int,long)) or stop is _MAXINF ):
+ raise TypeError("Invalid type of tuple stop.")
+ if ( start is not _MININF and stop is not _MAXINF and
+ start > stop ):
+ continue
+ if isinstance(stop,(int,long)):
+ stop += 1
+ else:
+ raise TypeError("Invalid argument.")
+
+ if start > self._max:
+ continue
+ elif start < self._min:
+ start = self._min
+ if stop < self._min:
+ continue
+ elif stop > self._max:
+ stop = self._max
+ self._ranges.append((start,stop))
+
+ # Normalize set.
+ self._normalize()
+
+ # Utility functions for set operations
+ # ------------------------------------
+
+ def _iterranges(self,r1,r2,minval=_MININF,maxval=_MAXINF):
+ curval = minval
+ curstates = {"r1":False,"r2":False}
+ imax, jmax = 2*len(r1), 2*len(r2)
+ i, j = 0, 0
+ while i < imax or j < jmax:
+ if i < imax and ( ( j < jmax and
+ r1[i>>1][i&1] < r2[j>>1][j&1] ) or
+ j == jmax ):
+ cur_r, newname, newstate = r1[i>>1][i&1], "r1", not (i&1)
+ i += 1
+ else:
+ cur_r, newname, newstate = r2[j>>1][j&1], "r2", not (j&1)
+ j += 1
+ if curval < cur_r:
+ if cur_r > maxval:
+ break
+ yield curstates, (curval,cur_r)
+ curval = cur_r
+ curstates[newname] = newstate
+ if curval < maxval:
+ yield curstates, (curval,maxval)
+
+ def _normalize(self):
+ self._ranges.sort()
+ i = 1
+ while i < len(self._ranges):
+ if self._ranges[i][0] < self._ranges[i-1][1]:
+ self._ranges[i-1] = (self._ranges[i-1][0],
+ max(self._ranges[i-1][1],
+ self._ranges[i][1]))
+ del self._ranges[i]
+ else:
+ i += 1
+ self._ranges = tuple(self._ranges)
+ self._hash = hash(self._ranges)
+
+ def __coerce__(self,other):
+ if isinstance(other,IntSet):
+ return self, other
+ elif isinstance(other,(int,long,tuple)):
+ try:
+ return self, self.__class__(other)
+ except TypeError:
+ # Catch a type error, in that case the structure specified by
+ # other is something we can't coerce, return NotImplemented.
+ # ValueErrors are not caught, they signal that the data was
+ # invalid for the constructor. This is appropriate to signal
+ # as a ValueError to the caller.
+ return NotImplemented
+ elif isinstance(other,list):
+ try:
+ return self, self.__class__(*other)
+ except TypeError:
+ # See above.
+ return NotImplemented
+ return NotImplemented
+
+ # Set function definitions
+ # ------------------------
+
+ def _make_function(name,type,doc,pall,pany=None):
+ """Makes a function to match two ranges. Accepts two types: either
+ 'set', which defines a function which returns a set with all ranges
+ matching pall (pany is ignored), or 'bool', which returns True if pall
+ matches for all ranges and pany matches for any one range. doc is the
+ dostring to give this function. pany may be none to ignore the any
+ match.
+
+ The predicates get a dict with two keys, 'r1', 'r2', which denote
+ whether the current range is present in range1 (self) and/or range2
+ (other) or none of the two, respectively."""
+
+ if type == "set":
+ def f(self,other):
+ coerced = self.__coerce__(other)
+ if coerced is NotImplemented:
+ return NotImplemented
+ other = coerced[1]
+ newset = self.__class__.__new__(self.__class__)
+ newset._min = min(self._min,other._min)
+ newset._max = max(self._max,other._max)
+ newset._ranges = []
+ for states, (start,stop) in \
+ self._iterranges(self._ranges,other._ranges,
+ newset._min,newset._max):
+ if pall(states):
+ if newset._ranges and newset._ranges[-1][1] == start:
+ newset._ranges[-1] = (newset._ranges[-1][0],stop)
+ else:
+ newset._ranges.append((start,stop))
+ newset._ranges = tuple(newset._ranges)
+ newset._hash = hash(self._ranges)
+ return newset
+ elif type == "bool":
+ def f(self,other):
+ coerced = self.__coerce__(other)
+ if coerced is NotImplemented:
+ return NotImplemented
+ other = coerced[1]
+ _min = min(self._min,other._min)
+ _max = max(self._max,other._max)
+ found = not pany
+ for states, (start,stop) in \
+ self._iterranges(self._ranges,other._ranges,_min,_max):
+ if not pall(states):
+ return False
+ found = found or pany(states)
+ return found
+ else:
+ raise ValueError("Invalid type of function to create.")
+ try:
+ f.func_name = name
+ except TypeError:
+ pass
+ f.func_doc = doc
+ return f
+
+ # Intersection.
+ __and__ = _make_function("__and__","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+ __rand__ = _make_function("__rand__","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+ intersection = _make_function("intersection","set",
+ "Intersection of two sets as a new set.",
+ lambda s: s["r1"] and s["r2"])
+
+ # Union.
+ __or__ = _make_function("__or__","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+ __ror__ = _make_function("__ror__","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+ union = _make_function("union","set",
+ "Union of two sets as a new set.",
+ lambda s: s["r1"] or s["r2"])
+
+ # Difference.
+ __sub__ = _make_function("__sub__","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r1"] and not s["r2"])
+ __rsub__ = _make_function("__rsub__","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r2"] and not s["r1"])
+ difference = _make_function("difference","set",
+ "Difference of two sets as a new set.",
+ lambda s: s["r1"] and not s["r2"])
+
+ # Symmetric difference.
+ __xor__ = _make_function("__xor__","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+ __rxor__ = _make_function("__rxor__","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+ symmetric_difference = _make_function("symmetric_difference","set",
+ "Symmetric difference of two sets as a new set.",
+ lambda s: s["r1"] ^ s["r2"])
+
+ # Containership testing.
+ __contains__ = _make_function("__contains__","bool",
+ "Returns true if self is superset of other.",
+ lambda s: s["r1"] or not s["r2"])
+ issubset = _make_function("issubset","bool",
+ "Returns true if self is subset of other.",
+ lambda s: s["r2"] or not s["r1"])
+ istruesubset = _make_function("istruesubset","bool",
+ "Returns true if self is true subset of other.",
+ lambda s: s["r2"] or not s["r1"],
+ lambda s: s["r2"] and not s["r1"])
+ issuperset = _make_function("issuperset","bool",
+ "Returns true if self is superset of other.",
+ lambda s: s["r1"] or not s["r2"])
+ istruesuperset = _make_function("istruesuperset","bool",
+ "Returns true if self is true superset of other.",
+ lambda s: s["r1"] or not s["r2"],
+ lambda s: s["r1"] and not s["r2"])
+ overlaps = _make_function("overlaps","bool",
+ "Returns true if self overlaps with other.",
+ lambda s: True,
+ lambda s: s["r1"] and s["r2"])
+
+ # Comparison.
+ __eq__ = _make_function("__eq__","bool",
+ "Returns true if self is equal to other.",
+ lambda s: not ( s["r1"] ^ s["r2"] ))
+ __ne__ = _make_function("__ne__","bool",
+ "Returns true if self is different to other.",
+ lambda s: True,
+ lambda s: s["r1"] ^ s["r2"])
+
+ # Clean up namespace.
+ del _make_function
+
+ # Define other functions.
+ def inverse(self):
+ """Inverse of set as a new set."""
+
+ newset = self.__class__.__new__(self.__class__)
+ newset._min = self._min
+ newset._max = self._max
+ newset._ranges = []
+ laststop = self._min
+ for r in self._ranges:
+ if laststop < r[0]:
+ newset._ranges.append((laststop,r[0]))
+ laststop = r[1]
+ if laststop < self._max:
+ newset._ranges.append((laststop,self._max))
+ return newset
+
+ __invert__ = inverse
+
+ # Hashing
+ # -------
+
+ def __hash__(self):
+ """Returns a hash value representing this integer set. As the set is
+ always stored normalized, the hash value is guaranteed to match for
+ matching ranges."""
+
+ return self._hash
+
+ # Iterating
+ # ---------
+
+ def __len__(self):
+ """Get length of this integer set. In case the length is larger than
+ 2**31 (including infinitely sized integer sets), it raises an
+ OverflowError. This is due to len() restricting the size to
+ 0 <= len < 2**31."""
+
+ if not self._ranges:
+ return 0
+ if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
+ raise OverflowError("Infinitely sized integer set.")
+ rlen = 0
+ for r in self._ranges:
+ rlen += r[1]-r[0]
+ if rlen >= 2**31:
+ raise OverflowError("Integer set bigger than 2**31.")
+ return rlen
+
+ def len(self):
+ """Returns the length of this integer set as an integer. In case the
+ length is infinite, returns -1. This function exists because of a
+ limitation of the builtin len() function which expects values in
+ the range 0 <= len < 2**31. Use this function in case your integer
+ set might be larger."""
+
+ if not self._ranges:
+ return 0
+ if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
+ return -1
+ rlen = 0
+ for r in self._ranges:
+ rlen += r[1]-r[0]
+ return rlen
+
+ def __nonzero__(self):
+ """Returns true if this integer set contains at least one item."""
+
+ return bool(self._ranges)
+
+ def __iter__(self):
+ """Iterate over all values in this integer set. Iteration always starts
+ by iterating from lowest to highest over the ranges that are bounded.
+ After processing these, all ranges that are unbounded (maximum 2) are
+ yielded intermixed."""
+
+ ubranges = []
+ for r in self._ranges:
+ if r[0] is _MININF:
+ if r[1] is _MAXINF:
+ ubranges.extend(([0,1],[-1,-1]))
+ else:
+ ubranges.append([r[1]-1,-1])
+ elif r[1] is _MAXINF:
+ ubranges.append([r[0],1])
+ else:
+ for val in xrange(r[0],r[1]):
+ yield val
+ if ubranges:
+ while True:
+ for ubrange in ubranges:
+ yield ubrange[0]
+ ubrange[0] += ubrange[1]
+
+ # Printing
+ # --------
+
+ def __repr__(self):
+ """Return a representation of this integer set. The representation is
+ executable to get an equal integer set."""
+
+ rv = []
+ for start, stop in self._ranges:
+ if ( isinstance(start,(int,long)) and isinstance(stop,(int,long))
+ and stop-start == 1 ):
+ rv.append("%r" % start)
+ elif isinstance(stop,(int,long)):
+ rv.append("(%r,%r)" % (start,stop-1))
+ else:
+ rv.append("(%r,%r)" % (start,stop))
+ if self._min is not _MININF:
+ rv.append("min=%r" % self._min)
+ if self._max is not _MAXINF:
+ rv.append("max=%r" % self._max)
+ return "%s(%s)" % (self.__class__.__name__,",".join(rv))
+
+if __name__ == "__main__":
+ # Little test script demonstrating functionality.
+ x = IntSet((10,20),30)
+ y = IntSet((10,20))
+ z = IntSet((10,20),30,(15,19),min=0,max=40)
+ print x
+ print x&110
+ print x|110
+ print x^(15,25)
+ print x-12
+ print 12 in x
+ print x.issubset(x)
+ print y.issubset(x)
+ print x.istruesubset(x)
+ print y.istruesubset(x)
+ for val in x:
+ print val
+ print x.inverse()
+ print x == z
+ print x == y
+ print x <> y
+ print hash(x)
+ print hash(z)
+ print len(x)
+ print x.len()
diff --git a/paste/util/ip4.py b/paste/util/ip4.py
new file mode 100644
index 0000000..b0dfde8
--- /dev/null
+++ b/paste/util/ip4.py
@@ -0,0 +1,273 @@
+# -*- coding: iso-8859-15 -*-
+"""IP4 address range set implementation.
+
+Implements an IPv4-range type.
+
+Copyright (C) 2006, Heiko Wundram.
+Released under the MIT-license.
+"""
+
+# Version information
+# -------------------
+
+__author__ = "Heiko Wundram <me@modelnine.org>"
+__version__ = "0.2"
+__revision__ = "3"
+__date__ = "2006-01-20"
+
+
+# Imports
+# -------
+
+import intset
+import socket
+
+
+# IP4Range class
+# --------------
+
+class IP4Range(intset.IntSet):
+ """IP4 address range class with efficient storage of address ranges.
+ Supports all set operations."""
+
+ _MINIP4 = 0
+ _MAXIP4 = (1<<32) - 1
+ _UNITYTRANS = "".join([chr(n) for n in range(256)])
+ _IPREMOVE = "0123456789."
+
+ def __init__(self,*args):
+ """Initialize an ip4range class. The constructor accepts an unlimited
+ number of arguments that may either be tuples in the form (start,stop),
+ integers, longs or strings, where start and stop in a tuple may
+ also be of the form integer, long or string.
+
+ Passing an integer or long means passing an IPv4-address that's already
+ been converted to integer notation, whereas passing a string specifies
+ an address where this conversion still has to be done. A string
+ address may be in the following formats:
+
+ - 1.2.3.4 - a plain address, interpreted as a single address
+ - 1.2.3 - a set of addresses, interpreted as 1.2.3.0-1.2.3.255
+ - localhost - hostname to look up, interpreted as single address
+ - 1.2.3<->5 - a set of addresses, interpreted as 1.2.3.0-1.2.5.255
+ - 1.2.0.0/16 - a set of addresses, interpreted as 1.2.0.0-1.2.255.255
+
+ Only the first three notations are valid if you use a string address in
+ a tuple, whereby notation 2 is interpreted as 1.2.3.0 if specified as
+ lower bound and 1.2.3.255 if specified as upper bound, not as a range
+ of addresses.
+
+ Specifying a range is done with the <-> operator. This is necessary
+ because '-' might be present in a hostname. '<->' shouldn't be, ever.
+ """
+
+ # Special case copy constructor.
+ if len(args) == 1 and isinstance(args[0],IP4Range):
+ super(IP4Range,self).__init__(args[0])
+ return
+
+ # Convert arguments to tuple syntax.
+ args = list(args)
+ for i in range(len(args)):
+ argval = args[i]
+ if isinstance(argval,str):
+ if "<->" in argval:
+ # Type 4 address.
+ args[i] = self._parseRange(*argval.split("<->",1))
+ continue
+ elif "/" in argval:
+ # Type 5 address.
+ args[i] = self._parseMask(*argval.split("/",1))
+ else:
+ # Type 1, 2 or 3.
+ args[i] = self._parseAddrRange(argval)
+ elif isinstance(argval,tuple):
+ if len(tuple) <> 2:
+ raise ValueError("Tuple is of invalid length.")
+ addr1, addr2 = argval
+ if isinstance(addr1,str):
+ addr1 = self._parseAddrRange(addr1)[0]
+ elif not isinstance(addr1,(int,long)):
+ raise TypeError("Invalid argument.")
+ if isinstance(addr2,str):
+ addr2 = self._parseAddrRange(addr2)[1]
+ elif not isinstance(addr2,(int,long)):
+ raise TypeError("Invalid argument.")
+ args[i] = (addr1,addr2)
+ elif not isinstance(argval,(int,long)):
+ raise TypeError("Invalid argument.")
+
+ # Initialize the integer set.
+ super(IP4Range,self).__init__(min=self._MINIP4,max=self._MAXIP4,*args)
+
+ # Parsing functions
+ # -----------------
+
+ def _parseRange(self,addr1,addr2):
+ naddr1, naddr1len = _parseAddr(addr1)
+ naddr2, naddr2len = _parseAddr(addr2)
+ if naddr2len < naddr1len:
+ naddr2 += naddr1&(((1<<((naddr1len-naddr2len)*8))-1)<<
+ (naddr2len*8))
+ naddr2len = naddr1len
+ elif naddr2len > naddr1len:
+ raise ValueError("Range has more dots than address.")
+ naddr1 <<= (4-naddr1len)*8
+ naddr2 <<= (4-naddr2len)*8
+ naddr2 += (1<<((4-naddr2len)*8))-1
+ return (naddr1,naddr2)
+
+ def _parseMask(self,addr,mask):
+ naddr, naddrlen = _parseAddr(addr)
+ naddr <<= (4-naddrlen)*8
+ try:
+ if not mask:
+ masklen = 0
+ else:
+ masklen = int(mask)
+ if not 0 <= masklen <= 32:
+ raise ValueError
+ except ValueError:
+ try:
+ mask = _parseAddr(mask,False)
+ except ValueError:
+ raise ValueError("Mask isn't parseable.")
+ remaining = 0
+ masklen = 0
+ if not mask:
+ masklen = 0
+ else:
+ while not (mask&1):
+ remaining += 1
+ while (mask&1):
+ mask >>= 1
+ masklen += 1
+ if remaining+masklen <> 32:
+ raise ValueError("Mask isn't a proper host mask.")
+ naddr1 = naddr & (((1<<masklen)-1)<<(32-masklen))
+ naddr2 = naddr1 + (1<<(32-masklen)) - 1
+ return (naddr1,naddr2)
+
+ def _parseAddrRange(self,addr):
+ naddr, naddrlen = _parseAddr(addr)
+ naddr1 = naddr<<((4-naddrlen)*8)
+ naddr2 = ( (naddr<<((4-naddrlen)*8)) +
+ (1<<((4-naddrlen)*8)) - 1 )
+ return (naddr1,naddr2)
+
+ # Utility functions
+ # -----------------
+
+ def _int2ip(self,num):
+ rv = []
+ for i in range(4):
+ rv.append(str(num&255))
+ num >>= 8
+ return ".".join(reversed(rv))
+
+ # Iterating
+ # ---------
+
+ def iteraddresses(self):
+ """Returns an iterator which iterates over ips in this iprange. An
+ IP is returned in string form (e.g. '1.2.3.4')."""
+
+ for v in super(IP4Range,self).__iter__():
+ yield self._int2ip(v)
+
+ def iterranges(self):
+ """Returns an iterator which iterates over ip-ip ranges which build
+ this iprange if combined. An ip-ip pair is returned in string form
+ (e.g. '1.2.3.4-2.3.4.5')."""
+
+ for r in self._ranges:
+ if r[1]-r[0] == 1:
+ yield self._int2ip(r[0])
+ else:
+ yield '%s-%s' % (self._int2ip(r[0]),self._int2ip(r[1]-1))
+
+ def itermasks(self):
+ """Returns an iterator which iterates over ip/mask pairs which build
+ this iprange if combined. An IP/Mask pair is returned in string form
+ (e.g. '1.2.3.0/24')."""
+
+ for r in self._ranges:
+ for v in self._itermasks(r):
+ yield v
+
+ def _itermasks(self,r):
+ ranges = [r]
+ while ranges:
+ cur = ranges.pop()
+ curmask = 0
+ while True:
+ curmasklen = 1<<(32-curmask)
+ start = (cur[0]+curmasklen-1)&(((1<<curmask)-1)<<(32-curmask))
+ if start >= cur[0] and start+curmasklen <= cur[1]:
+ break
+ else:
+ curmask += 1
+ yield "%s/%s" % (self._int2ip(start),curmask)
+ if cur[0] < start:
+ ranges.append((cur[0],start))
+ if cur[1] > start+curmasklen:
+ ranges.append((start+curmasklen,cur[1]))
+
+ __iter__ = iteraddresses
+
+ # Printing
+ # --------
+
+ def __repr__(self):
+ """Returns a string which can be used to reconstruct this iprange."""
+
+ rv = []
+ for start, stop in self._ranges:
+ if stop-start == 1:
+ rv.append("%r" % (self._int2ip(start),))
+ else:
+ rv.append("(%r,%r)" % (self._int2ip(start),
+ self._int2ip(stop-1)))
+ return "%s(%s)" % (self.__class__.__name__,",".join(rv))
+
+def _parseAddr(addr,lookup=True):
+ if lookup and addr.translate(IP4Range._UNITYTRANS, IP4Range._IPREMOVE):
+ try:
+ addr = socket.gethostbyname(addr)
+ except socket.error:
+ raise ValueError("Invalid Hostname as argument.")
+ naddr = 0
+ for naddrpos, part in enumerate(addr.split(".")):
+ if naddrpos >= 4:
+ raise ValueError("Address contains more than four parts.")
+ try:
+ if not part:
+ part = 0
+ else:
+ part = int(part)
+ if not 0 <= part < 256:
+ raise ValueError
+ except ValueError:
+ raise ValueError("Address part out of range.")
+ naddr <<= 8
+ naddr += part
+ return naddr, naddrpos+1
+
+def ip2int(addr, lookup=True):
+ return _parseAddr(addr, lookup=lookup)[0]
+
+if __name__ == "__main__":
+ # Little test script.
+ x = IP4Range("172.22.162.250/24")
+ y = IP4Range("172.22.162.250","172.22.163.250","172.22.163.253<->255")
+ print x
+ for val in x.itermasks():
+ print val
+ for val in y.itermasks():
+ print val
+ for val in (x|y).itermasks():
+ print val
+ for val in (x^y).iterranges():
+ print val
+ for val in x:
+ print val
diff --git a/paste/util/killthread.py b/paste/util/killthread.py
new file mode 100644
index 0000000..f1fc93f
--- /dev/null
+++ b/paste/util/killthread.py
@@ -0,0 +1,30 @@
+"""
+Kill a thread, from http://sebulba.wikispaces.com/recipe+thread2
+"""
+import types
+try:
+ import ctypes
+except ImportError:
+ raise ImportError(
+ "You cannot use paste.util.killthread without ctypes installed")
+if not hasattr(ctypes, 'pythonapi'):
+ raise ImportError(
+ "You cannot use paste.util.killthread without ctypes.pythonapi")
+
+def async_raise(tid, exctype):
+ """raises the exception, performs cleanup if needed.
+
+ tid is the value given by thread.get_ident() (an integer).
+ Raise SystemExit to kill a thread."""
+ if not isinstance(exctype, (types.ClassType, type)):
+ raise TypeError("Only types can be raised (not instances)")
+ if not isinstance(tid, int):
+ raise TypeError("tid must be an integer")
+ res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
+ if res == 0:
+ raise ValueError("invalid thread id")
+ elif res != 1:
+ # """if it returns a number greater than one, you're in trouble,
+ # and you should call it again with exc=NULL to revert the effect"""
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
+ raise SystemError("PyThreadState_SetAsyncExc failed")
diff --git a/paste/util/looper.py b/paste/util/looper.py
new file mode 100644
index 0000000..03a6b42
--- /dev/null
+++ b/paste/util/looper.py
@@ -0,0 +1,152 @@
+"""
+Helper for looping over sequences, particular in templates.
+
+Often in a loop in a template it's handy to know what's next up,
+previously up, if this is the first or last item in the sequence, etc.
+These can be awkward to manage in a normal Python loop, but using the
+looper you can get a better sense of the context. Use like::
+
+ >>> for loop, item in looper(['a', 'b', 'c']):
+ ... print loop.number, item
+ ... if not loop.last:
+ ... print '---'
+ 1 a
+ ---
+ 2 b
+ ---
+ 3 c
+
+"""
+
+__all__ = ['looper']
+
+class looper(object):
+ """
+ Helper for looping (particularly in templates)
+
+ Use this like::
+
+ for loop, item in looper(seq):
+ if loop.first:
+ ...
+ """
+
+ def __init__(self, seq):
+ self.seq = seq
+
+ def __iter__(self):
+ return looper_iter(self.seq)
+
+ def __repr__(self):
+ return '<%s for %r>' % (
+ self.__class__.__name__, self.seq)
+
+class looper_iter(object):
+
+ def __init__(self, seq):
+ self.seq = list(seq)
+ self.pos = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.pos >= len(self.seq):
+ raise StopIteration
+ result = loop_pos(self.seq, self.pos), self.seq[self.pos]
+ self.pos += 1
+ return result
+
+class loop_pos(object):
+
+ def __init__(self, seq, pos):
+ self.seq = seq
+ self.pos = pos
+
+ def __repr__(self):
+ return '<loop pos=%r at %r>' % (
+ self.seq[pos], pos)
+
+ def index(self):
+ return self.pos
+ index = property(index)
+
+ def number(self):
+ return self.pos + 1
+ number = property(number)
+
+ def item(self):
+ return self.seq[self.pos]
+ item = property(item)
+
+ def next(self):
+ try:
+ return self.seq[self.pos+1]
+ except IndexError:
+ return None
+ next = property(next)
+
+ def previous(self):
+ if self.pos == 0:
+ return None
+ return self.seq[self.pos-1]
+ previous = property(previous)
+
+ def odd(self):
+ return not self.pos % 2
+ odd = property(odd)
+
+ def even(self):
+ return self.pos % 2
+ even = property(even)
+
+ def first(self):
+ return self.pos == 0
+ first = property(first)
+
+ def last(self):
+ return self.pos == len(self.seq)-1
+ last = property(last)
+
+ def length(self):
+ return len(self.seq)
+ length = property(length)
+
+ def first_group(self, getter=None):
+ """
+ Returns true if this item is the start of a new group,
+ where groups mean that some attribute has changed. The getter
+ can be None (the item itself changes), an attribute name like
+ ``'.attr'``, a function, or a dict key or list index.
+ """
+ if self.first:
+ return True
+ return self._compare_group(self.item, self.previous, getter)
+
+ def last_group(self, getter=None):
+ """
+ Returns true if this item is the end of a new group,
+ where groups mean that some attribute has changed. The getter
+ can be None (the item itself changes), an attribute name like
+ ``'.attr'``, a function, or a dict key or list index.
+ """
+ if self.last:
+ return True
+ return self._compare_group(self.item, self.next, getter)
+
+ def _compare_group(self, item, other, getter):
+ if getter is None:
+ return item != other
+ elif (isinstance(getter, basestring)
+ and getter.startswith('.')):
+ getter = getter[1:]
+ if getter.endswith('()'):
+ getter = getter[:-2]
+ return getattr(item, getter)() != getattr(other, getter)()
+ else:
+ return getattr(item, getter) != getattr(other, getter)
+ elif callable(getter):
+ return getter(item) != getter(other)
+ else:
+ return item[getter] != other[getter]
+
diff --git a/paste/util/mimeparse.py b/paste/util/mimeparse.py
new file mode 100644
index 0000000..fe699f7
--- /dev/null
+++ b/paste/util/mimeparse.py
@@ -0,0 +1,160 @@
+"""MIME-Type Parser
+
+This module provides basic functions for handling mime-types. It can handle
+matching mime-types against a list of media-ranges. See section 14.1 of
+the HTTP specification [RFC 2616] for a complete explanation.
+
+ http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
+
+Based on mimeparse 0.1.2 by Joe Gregorio:
+
+ http://code.google.com/p/mimeparse/
+
+Contents:
+ - parse_mime_type(): Parses a mime-type into its component parts.
+ - parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
+ - quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
+ - quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
+ - best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
+ - desired_matches(): Filter against a list of desired mime-types in the order the server prefers.
+
+"""
+
+
+def parse_mime_type(mime_type):
+ """Carves up a mime-type and returns a tuple of the
+ (type, subtype, params) where 'params' is a dictionary
+ of all the parameters for the media range.
+ For example, the media range 'application/xhtml;q=0.5' would
+ get parsed into:
+
+ ('application', 'xhtml', {'q', '0.5'})
+ """
+ type = mime_type.split(';')
+ type, plist = type[0], type[1:]
+ try:
+ type, subtype = type.split('/', 1)
+ except ValueError:
+ type, subtype = type.strip() or '*', '*'
+ else:
+ type = type.strip() or '*'
+ subtype = subtype.strip() or '*'
+ params = {}
+ for param in plist:
+ param = param.split('=', 1)
+ if len(param) == 2:
+ key, value = param[0].strip(), param[1].strip()
+ if key and value:
+ params[key] = value
+ return type, subtype, params
+
+def parse_media_range(range):
+ """Carves up a media range and returns a tuple of the
+ (type, subtype, params) where 'params' is a dictionary
+ of all the parameters for the media range.
+ For example, the media range 'application/*;q=0.5' would
+ get parsed into:
+
+ ('application', '*', {'q', '0.5'})
+
+ In addition this function also guarantees that there
+ is a value for 'q' in the params dictionary, filling it
+ in with a proper default if necessary.
+ """
+ type, subtype, params = parse_mime_type(range)
+ try:
+ if not 0 <= float(params['q']) <= 1:
+ raise ValueError
+ except (KeyError, ValueError):
+ params['q'] = '1'
+ return type, subtype, params
+
+def fitness_and_quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a given mime-type against
+ a list of media_ranges that have already been
+ parsed by parse_media_range(). Returns a tuple of
+ the fitness value and the value of the 'q' quality
+ parameter of the best match, or (-1, 0) if no match
+ was found. Just as for quality_parsed(), 'parsed_ranges'
+ must be a list of parsed media ranges."""
+ best_fitness, best_fit_q = -1, 0
+ target_type, target_subtype, target_params = parse_media_range(mime_type)
+ for type, subtype, params in parsed_ranges:
+ if (type == target_type
+ or type == '*' or target_type == '*') and (
+ subtype == target_subtype
+ or subtype == '*' or target_subtype == '*'):
+ fitness = 0
+ if type == target_type:
+ fitness += 100
+ if subtype == target_subtype:
+ fitness += 10
+ for key in target_params:
+ if key != 'q' and key in params:
+ if params[key] == target_params[key]:
+ fitness += 1
+ if fitness > best_fitness:
+ best_fitness = fitness
+ best_fit_q = params['q']
+ return best_fitness, float(best_fit_q)
+
+def quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a given mime-type against
+ a list of media_ranges that have already been
+ parsed by parse_media_range(). Returns the
+ 'q' quality parameter of the best match, 0 if no
+ match was found. This function behaves the same as quality()
+ except that 'parsed_ranges' must be a list of
+ parsed media ranges."""
+ return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
+
+def quality(mime_type, ranges):
+ """Returns the quality 'q' of a mime-type when compared
+ against the media-ranges in ranges. For example:
+
+ >>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
+ 0.7
+
+ """
+ parsed_ranges = map(parse_media_range, ranges.split(','))
+ return quality_parsed(mime_type, parsed_ranges)
+
+def best_match(supported, header):
+ """Takes a list of supported mime-types and finds the best
+ match for all the media-ranges listed in header. In case of
+ ambiguity, whatever comes first in the list will be chosen.
+ The value of header must be a string that conforms to the format
+ of the HTTP Accept: header. The value of 'supported' is a list
+ of mime-types.
+
+ >>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
+ 'text/xml'
+ """
+ if not supported:
+ return ''
+ parsed_header = map(parse_media_range, header.split(','))
+ best_type = max([
+ (fitness_and_quality_parsed(mime_type, parsed_header), -n)
+ for n, mime_type in enumerate(supported)])
+ return best_type[0][1] and supported[-best_type[1]] or ''
+
+def desired_matches(desired, header):
+ """Takes a list of desired mime-types in the order the server prefers to
+ send them regardless of the browsers preference.
+
+ Browsers (such as Firefox) technically want XML over HTML depending on how
+ one reads the specification. This function is provided for a server to
+ declare a set of desired mime-types it supports, and returns a subset of
+ the desired list in the same order should each one be Accepted by the
+ browser.
+
+ >>> desired_matches(['text/html', 'application/xml'], \
+ ... 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png')
+ ['text/html', 'application/xml']
+ >>> desired_matches(['text/html', 'application/xml'], 'application/xml,application/json')
+ ['application/xml']
+ """
+ parsed_ranges = map(parse_media_range, header.split(','))
+ return [mimetype for mimetype in desired
+ if quality_parsed(mimetype, parsed_ranges)]
+
diff --git a/paste/util/multidict.py b/paste/util/multidict.py
new file mode 100644
index 0000000..d3eb1e9
--- /dev/null
+++ b/paste/util/multidict.py
@@ -0,0 +1,397 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+import cgi
+import copy
+import sys
+from UserDict import DictMixin
+
+class MultiDict(DictMixin):
+
+ """
+ An ordered dictionary that can have multiple values for each key.
+ Adds the methods getall, getone, mixed, and add to the normal
+ dictionary interface.
+ """
+
+ def __init__(self, *args, **kw):
+ if len(args) > 1:
+ raise TypeError(
+ "MultiDict can only be called with one positional argument")
+ if args:
+ if hasattr(args[0], 'iteritems'):
+ items = list(args[0].iteritems())
+ elif hasattr(args[0], 'items'):
+ items = args[0].items()
+ else:
+ items = list(args[0])
+ self._items = items
+ else:
+ self._items = []
+ self._items.extend(kw.iteritems())
+
+ def __getitem__(self, key):
+ for k, v in self._items:
+ if k == key:
+ return v
+ raise KeyError(repr(key))
+
+ def __setitem__(self, key, value):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+ self._items.append((key, value))
+
+ def add(self, key, value):
+ """
+ Add the key and value, not overwriting any previous value.
+ """
+ self._items.append((key, value))
+
+ def getall(self, key):
+ """
+ Return a list of all values matching the key (may be an empty list)
+ """
+ result = []
+ for k, v in self._items:
+ if key == k:
+ result.append(v)
+ return result
+
+ def getone(self, key):
+ """
+ Get one value matching the key, raising a KeyError if multiple
+ values were found.
+ """
+ v = self.getall(key)
+ if not v:
+ raise KeyError('Key not found: %r' % key)
+ if len(v) > 1:
+ raise KeyError('Multiple values match %r: %r' % (key, v))
+ return v[0]
+
+ def mixed(self):
+ """
+ Returns a dictionary where the values are either single
+ values, or a list of values when a key/value appears more than
+ once in this dictionary. This is similar to the kind of
+ dictionary often used to represent the variables in a web
+ request.
+ """
+ result = {}
+ multi = {}
+ for key, value in self._items:
+ if key in result:
+ # We do this to not clobber any lists that are
+ # *actual* values in this dictionary:
+ if key in multi:
+ result[key].append(value)
+ else:
+ result[key] = [result[key], value]
+ multi[key] = None
+ else:
+ result[key] = value
+ return result
+
+ def dict_of_lists(self):
+ """
+ Returns a dictionary where each key is associated with a
+ list of values.
+ """
+ result = {}
+ for key, value in self._items:
+ if key in result:
+ result[key].append(value)
+ else:
+ result[key] = [value]
+ return result
+
+ def __delitem__(self, key):
+ items = self._items
+ found = False
+ for i in range(len(items)-1, -1, -1):
+ if items[i][0] == key:
+ del items[i]
+ found = True
+ if not found:
+ raise KeyError(repr(key))
+
+ def __contains__(self, key):
+ for k, v in self._items:
+ if k == key:
+ return True
+ return False
+
+ has_key = __contains__
+
+ def clear(self):
+ self._items = []
+
+ def copy(self):
+ return MultiDict(self)
+
+ def setdefault(self, key, default=None):
+ for k, v in self._items:
+ if key == k:
+ return v
+ self._items.append((key, default))
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ for i in range(len(self._items)):
+ if self._items[i][0] == key:
+ v = self._items[i][1]
+ del self._items[i]
+ return v
+ if args:
+ return args[0]
+ else:
+ raise KeyError(repr(key))
+
+ def popitem(self):
+ return self._items.pop()
+
+ def update(self, other=None, **kwargs):
+ if other is None:
+ pass
+ elif hasattr(other, 'items'):
+ self._items.extend(other.items())
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self._items.append((k, other[k]))
+ else:
+ for k, v in other:
+ self._items.append((k, v))
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ items = ', '.join(['(%r, %r)' % v for v in self._items])
+ return '%s([%s])' % (self.__class__.__name__, items)
+
+ def __len__(self):
+ return len(self._items)
+
+ ##
+ ## All the iteration:
+ ##
+
+ def keys(self):
+ return [k for k, v in self._items]
+
+ def iterkeys(self):
+ for k, v in self._items:
+ yield k
+
+ __iter__ = iterkeys
+
+ def items(self):
+ return self._items[:]
+
+ def iteritems(self):
+ return iter(self._items)
+
+ def values(self):
+ return [v for k, v in self._items]
+
+ def itervalues(self):
+ for k, v in self._items:
+ yield v
+
+class UnicodeMultiDict(DictMixin):
+ """
+ A MultiDict wrapper that decodes returned values to unicode on the
+ fly. Decoding is not applied to assigned values.
+
+ The key/value contents are assumed to be ``str``/``strs`` or
+ ``str``/``FieldStorages`` (as is returned by the ``paste.request.parse_``
+ functions).
+
+ Can optionally also decode keys when the ``decode_keys`` argument is
+ True.
+
+ ``FieldStorage`` instances are cloned, and the clone's ``filename``
+ variable is decoded. Its ``name`` variable is decoded when ``decode_keys``
+ is enabled.
+
+ """
+ def __init__(self, multi=None, encoding=None, errors='strict',
+ decode_keys=False):
+ self.multi = multi
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ self.encoding = encoding
+ self.errors = errors
+ self.decode_keys = decode_keys
+
+ def _decode_key(self, key):
+ if self.decode_keys:
+ try:
+ key = key.decode(self.encoding, self.errors)
+ except AttributeError:
+ pass
+ return key
+
+ def _decode_value(self, value):
+ """
+ Decode the specified value to unicode. Assumes value is a ``str`` or
+ `FieldStorage`` object.
+
+ ``FieldStorage`` objects are specially handled.
+ """
+ if isinstance(value, cgi.FieldStorage):
+ # decode FieldStorage's field name and filename
+ value = copy.copy(value)
+ if self.decode_keys:
+ value.name = value.name.decode(self.encoding, self.errors)
+ value.filename = value.filename.decode(self.encoding, self.errors)
+ else:
+ try:
+ value = value.decode(self.encoding, self.errors)
+ except AttributeError:
+ pass
+ return value
+
+ def __getitem__(self, key):
+ return self._decode_value(self.multi.__getitem__(key))
+
+ def __setitem__(self, key, value):
+ self.multi.__setitem__(key, value)
+
+ def add(self, key, value):
+ """
+ Add the key and value, not overwriting any previous value.
+ """
+ self.multi.add(key, value)
+
+ def getall(self, key):
+ """
+ Return a list of all values matching the key (may be an empty list)
+ """
+ return [self._decode_value(v) for v in self.multi.getall(key)]
+
+ def getone(self, key):
+ """
+ Get one value matching the key, raising a KeyError if multiple
+ values were found.
+ """
+ return self._decode_value(self.multi.getone(key))
+
+ def mixed(self):
+ """
+ Returns a dictionary where the values are either single
+ values, or a list of values when a key/value appears more than
+ once in this dictionary. This is similar to the kind of
+ dictionary often used to represent the variables in a web
+ request.
+ """
+ unicode_mixed = {}
+ for key, value in self.multi.mixed().iteritems():
+ if isinstance(value, list):
+ value = [self._decode_value(value) for value in value]
+ else:
+ value = self._decode_value(value)
+ unicode_mixed[self._decode_key(key)] = value
+ return unicode_mixed
+
+ def dict_of_lists(self):
+ """
+ Returns a dictionary where each key is associated with a
+ list of values.
+ """
+ unicode_dict = {}
+ for key, value in self.multi.dict_of_lists().iteritems():
+ value = [self._decode_value(value) for value in value]
+ unicode_dict[self._decode_key(key)] = value
+ return unicode_dict
+
+ def __delitem__(self, key):
+ self.multi.__delitem__(key)
+
+ def __contains__(self, key):
+ return self.multi.__contains__(key)
+
+ has_key = __contains__
+
+ def clear(self):
+ self.multi.clear()
+
+ def copy(self):
+ return UnicodeMultiDict(self.multi.copy(), self.encoding, self.errors)
+
+ def setdefault(self, key, default=None):
+ return self._decode_value(self.multi.setdefault(key, default))
+
+ def pop(self, key, *args):
+ return self._decode_value(self.multi.pop(key, *args))
+
+ def popitem(self):
+ k, v = self.multi.popitem()
+ return (self._decode_key(k), self._decode_value(v))
+
+ def __repr__(self):
+ items = ', '.join(['(%r, %r)' % v for v in self.items()])
+ return '%s([%s])' % (self.__class__.__name__, items)
+
+ def __len__(self):
+ return self.multi.__len__()
+
+ ##
+ ## All the iteration:
+ ##
+
+ def keys(self):
+ return [self._decode_key(k) for k in self.multi.iterkeys()]
+
+ def iterkeys(self):
+ for k in self.multi.iterkeys():
+ yield self._decode_key(k)
+
+ __iter__ = iterkeys
+
+ def items(self):
+ return [(self._decode_key(k), self._decode_value(v)) for \
+ k, v in self.multi.iteritems()]
+
+ def iteritems(self):
+ for k, v in self.multi.iteritems():
+ yield (self._decode_key(k), self._decode_value(v))
+
+ def values(self):
+ return [self._decode_value(v) for v in self.multi.itervalues()]
+
+ def itervalues(self):
+ for v in self.multi.itervalues():
+ yield self._decode_value(v)
+
+__test__ = {
+ 'general': """
+ >>> d = MultiDict(a=1, b=2)
+ >>> d['a']
+ 1
+ >>> d.getall('c')
+ []
+ >>> d.add('a', 2)
+ >>> d['a']
+ 1
+ >>> d.getall('a')
+ [1, 2]
+ >>> d['b'] = 4
+ >>> d.getall('b')
+ [4]
+ >>> d.keys()
+ ['a', 'a', 'b']
+ >>> d.items()
+ [('a', 1), ('a', 2), ('b', 4)]
+ >>> d.mixed()
+ {'a': [1, 2], 'b': 4}
+ >>> MultiDict([('a', 'b')], c=2)
+ MultiDict([('a', 'b'), ('c', 2)])
+ """}
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/quoting.py b/paste/util/quoting.py
new file mode 100644
index 0000000..6184752
--- /dev/null
+++ b/paste/util/quoting.py
@@ -0,0 +1,98 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+import cgi
+import htmlentitydefs
+import urllib
+import re
+
+__all__ = ['html_quote', 'html_unquote', 'url_quote', 'url_unquote',
+ 'strip_html']
+
+default_encoding = 'UTF-8'
+
+def html_quote(v, encoding=None):
+ r"""
+ Quote the value (turned to a string) as HTML. This quotes <, >,
+ and quotes:
+
+ >>> html_quote(1)
+ '1'
+ >>> html_quote(None)
+ ''
+ >>> html_quote('<hey!>')
+ '&lt;hey!&gt;'
+ >>> html_quote(u'\u1029')
+ '\xe1\x80\xa9'
+ """
+ encoding = encoding or default_encoding
+ if v is None:
+ return ''
+ elif isinstance(v, str):
+ return cgi.escape(v, 1)
+ elif isinstance(v, unicode):
+ return cgi.escape(v.encode(encoding), 1)
+ else:
+ return cgi.escape(unicode(v).encode(encoding), 1)
+
+_unquote_re = re.compile(r'&([a-zA-Z]+);')
+def _entity_subber(match, name2c=htmlentitydefs.name2codepoint):
+ code = name2c.get(match.group(1))
+ if code:
+ return unichr(code)
+ else:
+ return match.group(0)
+
+def html_unquote(s, encoding=None):
+ r"""
+ Decode the value.
+
+ >>> html_unquote('&lt;hey&nbsp;you&gt;')
+ u'<hey\xa0you>'
+ >>> html_unquote('')
+ u''
+ >>> html_unquote('&blahblah;')
+ u'&blahblah;'
+ >>> html_unquote('\xe1\x80\xa9')
+ u'\u1029'
+ """
+ if isinstance(s, str):
+ if s == '':
+ # workaround re.sub('', '', u'') returning '' < 2.5.2
+ # instead of u'' >= 2.5.2
+ return u''
+ s = s.decode(encoding or default_encoding)
+ return _unquote_re.sub(_entity_subber, s)
+
+def strip_html(s):
+ # should this use html_unquote?
+ s = re.sub('<.*?>', '', s)
+ s = html_unquote(s)
+ return s
+
+def no_quote(s):
+ """
+ Quoting that doesn't do anything
+ """
+ return s
+
+_comment_quote_re = re.compile(r'\-\s*\>')
+# Everything but \r, \n, \t:
+_bad_chars_re = re.compile('[\x00-\x08\x0b-\x0c\x0e-\x1f]')
+def comment_quote(s):
+ """
+ Quote that makes sure text can't escape a comment
+ """
+ comment = str(s)
+ #comment = _bad_chars_re.sub('', comment)
+ #print 'in ', repr(str(s))
+ #print 'out', repr(comment)
+ comment = _comment_quote_re.sub('-&gt;', comment)
+ return comment
+
+url_quote = urllib.quote
+url_unquote = urllib.unquote
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/paste/util/scgiserver.py b/paste/util/scgiserver.py
new file mode 100644
index 0000000..d20c952
--- /dev/null
+++ b/paste/util/scgiserver.py
@@ -0,0 +1,171 @@
+"""
+SCGI-->WSGI application proxy, "SWAP".
+
+(Originally written by Titus Brown.)
+
+This lets an SCGI front-end like mod_scgi be used to execute WSGI
+application objects. To use it, subclass the SWAP class like so::
+
+ class TestAppHandler(swap.SWAP):
+ def __init__(self, *args, **kwargs):
+ self.prefix = '/canal'
+ self.app_obj = TestAppClass
+ swap.SWAP.__init__(self, *args, **kwargs)
+
+where 'TestAppClass' is the application object from WSGI and '/canal'
+is the prefix for what is served by the SCGI Web-server-side process.
+
+Then execute the SCGI handler "as usual" by doing something like this::
+
+ scgi_server.SCGIServer(TestAppHandler, port=4000).serve()
+
+and point mod_scgi (or whatever your SCGI front end is) at port 4000.
+
+Kudos to the WSGI folk for writing a nice PEP & the Quixote folk for
+writing a nice extensible SCGI server for Python!
+"""
+
+import sys
+import time
+from scgi import scgi_server
+
+def debug(msg):
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S",
+ time.localtime(time.time()))
+ sys.stderr.write("[%s] %s\n" % (timestamp, msg))
+
+class SWAP(scgi_server.SCGIHandler):
+ """
+ SCGI->WSGI application proxy: let an SCGI server execute WSGI
+ application objects.
+ """
+ app_obj = None
+ prefix = None
+
+ def __init__(self, *args, **kwargs):
+ assert self.app_obj, "must set app_obj"
+ assert self.prefix is not None, "must set prefix"
+ args = (self,) + args
+ scgi_server.SCGIHandler.__init__(*args, **kwargs)
+
+ def handle_connection(self, conn):
+ """
+ Handle an individual connection.
+ """
+ input = conn.makefile("r")
+ output = conn.makefile("w")
+
+ environ = self.read_env(input)
+ environ['wsgi.input'] = input
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1, 0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = False
+
+ # dunno how SCGI does HTTPS signalling; can't test it myself... @CTB
+ if environ.get('HTTPS','off') in ('on','1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ ## SCGI does some weird environ manglement. We need to set
+ ## SCRIPT_NAME from 'prefix' and then set PATH_INFO from
+ ## REQUEST_URI.
+
+ prefix = self.prefix
+ path = environ['REQUEST_URI'][len(prefix):].split('?', 1)[0]
+
+ environ['SCRIPT_NAME'] = prefix
+ environ['PATH_INFO'] = path
+
+ headers_set = []
+ headers_sent = []
+ chunks = []
+ def write(data):
+ chunks.append(data)
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ ###
+
+ result = self.app_obj(environ, start_response)
+ try:
+ for data in result:
+ chunks.append(data)
+
+ # Before the first output, send the stored headers
+ if not headers_set:
+ # Error -- the app never called start_response
+ status = '500 Server Error'
+ response_headers = [('Content-type', 'text/html')]
+ chunks = ["XXX start_response never called"]
+ else:
+ status, response_headers = headers_sent[:] = headers_set
+
+ output.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ output.write('%s: %s\r\n' % header)
+ output.write('\r\n')
+
+ for data in chunks:
+ output.write(data)
+ finally:
+ if hasattr(result,'close'):
+ result.close()
+
+ # SCGI backends use connection closing to signal 'fini'.
+ try:
+ input.close()
+ output.close()
+ conn.close()
+ except IOError, err:
+ debug("IOError while closing connection ignored: %s" % err)
+
+
+def serve_application(application, prefix, port=None, host=None, max_children=None):
+ """
+ Serve the specified WSGI application via SCGI proxy.
+
+ ``application``
+ The WSGI application to serve.
+
+ ``prefix``
+ The prefix for what is served by the SCGI Web-server-side process.
+
+ ``port``
+ Optional port to bind the SCGI proxy to. Defaults to SCGIServer's
+ default port value.
+
+ ``host``
+ Optional host to bind the SCGI proxy to. Defaults to SCGIServer's
+ default host value.
+
+ ``host``
+ Optional maximum number of child processes the SCGIServer will
+ spawn. Defaults to SCGIServer's default max_children value.
+ """
+ class SCGIAppHandler(SWAP):
+ def __init__ (self, *args, **kwargs):
+ self.prefix = prefix
+ self.app_obj = application
+ SWAP.__init__(self, *args, **kwargs)
+
+ kwargs = dict(handler_class=SCGIAppHandler)
+ for kwarg in ('host', 'port', 'max_children'):
+ if locals()[kwarg] is not None:
+ kwargs[kwarg] = locals()[kwarg]
+
+ scgi_server.SCGIServer(**kwargs).serve()
diff --git a/paste/util/string24.py b/paste/util/string24.py
new file mode 100644
index 0000000..7c0e001
--- /dev/null
+++ b/paste/util/string24.py
@@ -0,0 +1,531 @@
+"""A collection of string operations (most are no longer used).
+
+Warning: most of the code you see here isn't normally used nowadays.
+Beginning with Python 1.6, many of these functions are implemented as
+methods on the standard string object. They used to be implemented by
+a built-in module called strop, but strop is now obsolete itself.
+
+Public module variables:
+
+whitespace -- a string containing all characters considered whitespace
+lowercase -- a string containing all characters considered lowercase letters
+uppercase -- a string containing all characters considered uppercase letters
+letters -- a string containing all characters considered letters
+digits -- a string containing all characters considered decimal digits
+hexdigits -- a string containing all characters considered hexadecimal digits
+octdigits -- a string containing all characters considered octal digits
+punctuation -- a string containing all characters considered punctuation
+printable -- a string containing all characters considered printable
+
+"""
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+letters = lowercase + uppercase
+ascii_lowercase = lowercase
+ascii_uppercase = uppercase
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + letters + punctuation + whitespace
+
+# Case conversion helpers
+# Use str to convert Unicode literal in case of -U
+# Note that Cookie.py bogusly uses _idmap :(
+l = map(chr, xrange(256))
+_idmap = str('').join(l)
+del l
+
+# Functions which aren't available as string methods.
+
+# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
+# See also regsub.capwords().
+def capwords(s, sep=None):
+ """capwords(s, [sep]) -> string
+
+ Split the argument into words using split, capitalize each
+ word using capitalize, and join the capitalized words using
+ join. Note that this replaces runs of whitespace characters by
+ a single space.
+
+ """
+ return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
+
+
+# Construct a translation string
+_idmapL = None
+def maketrans(fromstr, tostr):
+ """maketrans(frm, to) -> string
+
+ Return a translation table (a string of 256 bytes long)
+ suitable for use in string.translate. The strings frm and to
+ must be of the same length.
+
+ """
+ if len(fromstr) != len(tostr):
+ raise ValueError, "maketrans arguments must have same length"
+ global _idmapL
+ if not _idmapL:
+ _idmapL = map(None, _idmap)
+ L = _idmapL[:]
+ fromstr = map(ord, fromstr)
+ for i in range(len(fromstr)):
+ L[fromstr[i]] = tostr[i]
+ return ''.join(L)
+
+
+
+####################################################################
+import re as _re
+
+class _multimap:
+ """Helper class for combining multiple mappings.
+
+ Used by .{safe_,}substitute() to combine the mapping and keyword
+ arguments.
+ """
+ def __init__(self, primary, secondary):
+ self._primary = primary
+ self._secondary = secondary
+
+ def __getitem__(self, key):
+ try:
+ return self._primary[key]
+ except KeyError:
+ return self._secondary[key]
+
+
+class _TemplateMetaclass(type):
+ pattern = r"""
+ %(delim)s(?:
+ (?P<escaped>%(delim)s) | # Escape sequence of two delimiters
+ (?P<named>%(id)s) | # delimiter and a Python identifier
+ {(?P<braced>%(id)s)} | # delimiter and a braced identifier
+ (?P<invalid>) # Other ill-formed delimiter exprs
+ )
+ """
+
+ def __init__(cls, name, bases, dct):
+ super(_TemplateMetaclass, cls).__init__(name, bases, dct)
+ if 'pattern' in dct:
+ pattern = cls.pattern
+ else:
+ pattern = _TemplateMetaclass.pattern % {
+ 'delim' : _re.escape(cls.delimiter),
+ 'id' : cls.idpattern,
+ }
+ cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
+
+
+class Template:
+ """A string class for supporting $-substitutions."""
+ __metaclass__ = _TemplateMetaclass
+
+ delimiter = '$'
+ idpattern = r'[_a-z][_a-z0-9]*'
+
+ def __init__(self, template):
+ self.template = template
+
+ # Search for $$, $identifier, ${identifier}, and any bare $'s
+
+ def _invalid(self, mo):
+ i = mo.start('invalid')
+ lines = self.template[:i].splitlines(True)
+ if not lines:
+ colno = 1
+ lineno = 1
+ else:
+ colno = i - len(''.join(lines[:-1]))
+ lineno = len(lines)
+ raise ValueError('Invalid placeholder in string: line %d, col %d' %
+ (lineno, colno))
+
+ def substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ val = mapping[named]
+ # We use this idiom instead of str() because the latter will
+ # fail if val is a Unicode containing non-ASCII characters.
+ return '%s' % val
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+ def safe_substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ named = mo.group('named')
+ if named is not None:
+ try:
+ # We use this idiom instead of str() because the latter
+ # will fail if val is a Unicode containing non-ASCII
+ return '%s' % mapping[named]
+ except KeyError:
+ return self.delimiter + named
+ braced = mo.group('braced')
+ if braced is not None:
+ try:
+ return '%s' % mapping[braced]
+ except KeyError:
+ return self.delimiter + '{' + braced + '}'
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ return self.delimiter
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+
+
+####################################################################
+# NOTE: Everything below here is deprecated. Use string methods instead.
+# This stuff will go away in Python 3.0.
+
+# Backward compatible names for exceptions
+index_error = ValueError
+atoi_error = ValueError
+atof_error = ValueError
+atol_error = ValueError
+
+# convert UPPER CASE letters to lower case
+def lower(s):
+ """lower(s) -> string
+
+ Return a copy of the string s converted to lowercase.
+
+ """
+ return s.lower()
+
+# Convert lower case letters to UPPER CASE
+def upper(s):
+ """upper(s) -> string
+
+ Return a copy of the string s converted to uppercase.
+
+ """
+ return s.upper()
+
+# Swap lower case letters and UPPER CASE
+def swapcase(s):
+ """swapcase(s) -> string
+
+ Return a copy of the string s with upper case characters
+ converted to lowercase and vice versa.
+
+ """
+ return s.swapcase()
+
+# Strip leading and trailing tabs and spaces
+def strip(s, chars=None):
+ """strip(s [,chars]) -> string
+
+ Return a copy of the string s with leading and trailing
+ whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+ If chars is unicode, S will be converted to unicode before stripping.
+
+ """
+ return s.strip(chars)
+
+# Strip leading tabs and spaces
+def lstrip(s, chars=None):
+ """lstrip(s [,chars]) -> string
+
+ Return a copy of the string s with leading whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.lstrip(chars)
+
+# Strip trailing tabs and spaces
+def rstrip(s, chars=None):
+ """rstrip(s [,chars]) -> string
+
+ Return a copy of the string s with trailing whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.rstrip(chars)
+
+
+# Split a string into a list of space/tab-separated words
+def split(s, sep=None, maxsplit=-1):
+ """split(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string. If maxsplit is given, splits at no more than
+ maxsplit places (resulting in at most maxsplit+1 words). If sep
+ is not specified or is None, any whitespace string is a separator.
+
+ (split and splitfields are synonymous)
+
+ """
+ return s.split(sep, maxsplit)
+splitfields = split
+
+# Split a string into a list of space/tab-separated words
+def rsplit(s, sep=None, maxsplit=-1):
+ """rsplit(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string, starting at the end of the string and working
+ to the front. If maxsplit is given, at most maxsplit splits are
+ done. If sep is not specified or is None, any whitespace string
+ is a separator.
+ """
+ return s.rsplit(sep, maxsplit)
+
+# Join fields with optional separator
+def join(words, sep = ' '):
+ """join(list [,sep]) -> string
+
+ Return a string composed of the words in list, with
+ intervening occurrences of sep. The default separator is a
+ single space.
+
+ (joinfields and join are synonymous)
+
+ """
+ return sep.join(words)
+joinfields = join
+
+# Find substring, raise exception if not found
+def index(s, *args):
+ """index(s, sub [,start [,end]]) -> int
+
+ Like find but raises ValueError when the substring is not found.
+
+ """
+ return s.index(*args)
+
+# Find last substring, raise exception if not found
+def rindex(s, *args):
+ """rindex(s, sub [,start [,end]]) -> int
+
+ Like rfind but raises ValueError when the substring is not found.
+
+ """
+ return s.rindex(*args)
+
+# Count non-overlapping occurrences of substring
+def count(s, *args):
+ """count(s, sub[, start[,end]]) -> int
+
+ Return the number of occurrences of substring sub in string
+ s[start:end]. Optional arguments start and end are
+ interpreted as in slice notation.
+
+ """
+ return s.count(*args)
+
+# Find substring, return -1 if not found
+def find(s, *args):
+ """find(s, sub [,start [,end]]) -> in
+
+ Return the lowest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.find(*args)
+
+# Find last substring, return -1 if not found
+def rfind(s, *args):
+ """rfind(s, sub [,start [,end]]) -> int
+
+ Return the highest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.rfind(*args)
+
+# for a bit of speed
+_float = float
+_int = int
+_long = long
+
+# Convert string to float
+def atof(s):
+ """atof(s) -> float
+
+ Return the floating point number represented by the string s.
+
+ """
+ return _float(s)
+
+
+# Convert string to integer
+def atoi(s , base=10):
+ """atoi(s [,base]) -> int
+
+ Return the integer represented by the string s in the given
+ base, which defaults to 10. The string s must consist of one
+ or more digits, possibly preceded by a sign. If base is 0, it
+ is chosen from the leading characters of s, 0 for octal, 0x or
+ 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
+ accepted.
+
+ """
+ return _int(s, base)
+
+
+# Convert string to long integer
+def atol(s, base=10):
+ """atol(s [,base]) -> long
+
+ Return the long integer represented by the string s in the
+ given base, which defaults to 10. The string s must consist
+ of one or more digits, possibly preceded by a sign. If base
+ is 0, it is chosen from the leading characters of s, 0 for
+ octal, 0x or 0X for hexadecimal. If base is 16, a preceding
+ 0x or 0X is accepted. A trailing L or l is not accepted,
+ unless base is 0.
+
+ """
+ return _long(s, base)
+
+
+# Left-justify a string
+def ljust(s, width, *args):
+ """ljust(s, width[, fillchar]) -> string
+
+ Return a left-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.ljust(width, *args)
+
+# Right-justify a string
+def rjust(s, width, *args):
+ """rjust(s, width[, fillchar]) -> string
+
+ Return a right-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.rjust(width, *args)
+
+# Center a string
+def center(s, width, *args):
+ """center(s, width[, fillchar]) -> string
+
+ Return a center version of s, in a field of the specified
+ width. padded with spaces as needed. The string is never
+ truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.center(width, *args)
+
+# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
+# Decadent feature: the argument may be a string or a number
+# (Use of this is deprecated; it should be a string as with ljust c.s.)
+def zfill(x, width):
+ """zfill(x, width) -> string
+
+ Pad a numeric string x with zeros on the left, to fill a field
+ of the specified width. The string x is never truncated.
+
+ """
+ if not isinstance(x, basestring):
+ x = repr(x)
+ return x.zfill(width)
+
+# Expand tabs in a string.
+# Doesn't take non-printing chars into account, but does understand \n.
+def expandtabs(s, tabsize=8):
+ """expandtabs(s [,tabsize]) -> string
+
+ Return a copy of the string s with all tab characters replaced
+ by the appropriate number of spaces, depending on the current
+ column, and the tabsize (default 8).
+
+ """
+ return s.expandtabs(tabsize)
+
+# Character translation through look-up table.
+def translate(s, table, deletions=""):
+ """translate(s,table [,deletions]) -> string
+
+ Return a copy of the string s, where all characters occurring
+ in the optional argument deletions are removed, and the
+ remaining characters have been mapped through the given
+ translation table, which must be a string of length 256. The
+ deletions argument is not allowed for Unicode strings.
+
+ """
+ if deletions:
+ return s.translate(table, deletions)
+ else:
+ # Add s[:0] so that if s is Unicode and table is an 8-bit string,
+ # table is converted to Unicode. This means that table *cannot*
+ # be a dictionary -- for that feature, use u.translate() directly.
+ return s.translate(table + s[:0])
+
+# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
+def capitalize(s):
+ """capitalize(s) -> string
+
+ Return a copy of the string s with only its first character
+ capitalized.
+
+ """
+ return s.capitalize()
+
+# Substring replacement (global)
+def replace(s, old, new, maxsplit=-1):
+ """replace (str, old, new[, maxsplit]) -> string
+
+ Return a copy of string str with all occurrences of substring
+ old replaced by new. If the optional argument maxsplit is
+ given, only the first maxsplit occurrences are replaced.
+
+ """
+ return s.replace(old, new, maxsplit)
+
+
+# Try importing optional built-in module "strop" -- if it exists,
+# it redefines some string operations that are 100-1000 times faster.
+# It also defines values for whitespace, lowercase and uppercase
+# that match <ctype.h>'s definitions.
+
+try:
+ from strop import maketrans, lowercase, uppercase, whitespace
+ letters = lowercase + uppercase
+except ImportError:
+ pass # Use the original versions
diff --git a/paste/util/subprocess24.py b/paste/util/subprocess24.py
new file mode 100644
index 0000000..57ec119
--- /dev/null
+++ b/paste/util/subprocess24.py
@@ -0,0 +1,1152 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# This module should remain compatible with Python 2.2, see PEP 291.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+r"""subprocess - Subprocesses with accessible I/O streams
+
+This module allows you to spawn processes, connect to their
+input/output/error pipes, and obtain their return codes. This module
+intends to replace several other, older modules and functions, like:
+
+os.system
+os.spawn*
+os.popen*
+popen2.*
+commands.*
+
+Information about how the subprocess module can be used to replace these
+modules and functions can be found below.
+
+
+
+Using the subprocess module
+===========================
+This module defines one class called Popen:
+
+class Popen(args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+
+
+Arguments are:
+
+args should be a string, or a sequence of program arguments. The
+program to execute is normally the first item in the args sequence or
+string, but can be explicitly set by using the executable argument.
+
+On UNIX, with shell=False (default): In this case, the Popen class
+uses os.execvp() to execute the child program. args should normally
+be a sequence. A string will be treated as a sequence with the string
+as the only item (the program to execute).
+
+On UNIX, with shell=True: If args is a string, it specifies the
+command string to execute through the shell. If args is a sequence,
+the first item specifies the command string, and any additional items
+will be treated as additional shell arguments.
+
+On Windows: the Popen class uses CreateProcess() to execute the child
+program, which operates on strings. If args is a sequence, it will be
+converted to a string using the list2cmdline method. Please note that
+not all MS Windows applications interpret the command line the same
+way: The list2cmdline is designed for applications using the same
+rules as the MS C runtime.
+
+bufsize, if given, has the same meaning as the corresponding argument
+to the built-in open() function: 0 means unbuffered, 1 means line
+buffered, any other positive value means use a buffer of
+(approximately) that size. A negative bufsize means to use the system
+default, which usually means fully buffered. The default value for
+bufsize is 0 (unbuffered).
+
+stdin, stdout and stderr specify the executed programs' standard
+input, standard output and standard error file handles, respectively.
+Valid values are PIPE, an existing file descriptor (a positive
+integer), an existing file object, and None. PIPE indicates that a
+new pipe to the child should be created. With None, no redirection
+will occur; the child's file handles will be inherited from the
+parent. Additionally, stderr can be STDOUT, which indicates that the
+stderr data from the applications should be captured into the same
+file handle as for stdout.
+
+If preexec_fn is set to a callable object, this object will be called
+in the child process just before the child is executed.
+
+If close_fds is true, all file descriptors except 0, 1 and 2 will be
+closed before the child process is executed.
+
+if shell is true, the specified command will be executed through the
+shell.
+
+If cwd is not None, the current directory will be changed to cwd
+before the child is executed.
+
+If env is not None, it defines the environment variables for the new
+process.
+
+If universal_newlines is true, the file objects stdout and stderr are
+opened as a text files, but lines may be terminated by any of '\n',
+the Unix end-of-line convention, '\r', the Macintosh convention or
+'\r\n', the Windows convention. All of these external representations
+are seen as '\n' by the Python program. Note: This feature is only
+available if Python is built with universal newline support (the
+default). Also, the newlines attribute of the file objects stdout,
+stdin and stderr are not updated by the communicate() method.
+
+The startupinfo and creationflags, if given, will be passed to the
+underlying CreateProcess() function. They can specify things such as
+appearance of the main window and priority for the new process.
+(Windows only)
+
+
+This module also defines two shortcut functions:
+
+call(*args, **kwargs):
+ Run command with arguments. Wait for command to complete, then
+ return the returncode attribute. The arguments are the same as for
+ the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+
+
+Exceptions
+----------
+Exceptions raised in the child process, before the new program has
+started to execute, will be re-raised in the parent. Additionally,
+the exception object will have one extra attribute called
+'child_traceback', which is a string containing traceback information
+from the childs point of view.
+
+The most common exception raised is OSError. This occurs, for
+example, when trying to execute a non-existent file. Applications
+should prepare for OSErrors.
+
+A ValueError will be raised if Popen is called with invalid arguments.
+
+
+Security
+--------
+Unlike some other popen functions, this implementation will never call
+/bin/sh implicitly. This means that all characters, including shell
+metacharacters, can safely be passed to child processes.
+
+
+Popen objects
+=============
+Instances of the Popen class have the following methods:
+
+poll()
+ Check if child process has terminated. Returns returncode
+ attribute.
+
+wait()
+ Wait for child process to terminate. Returns returncode attribute.
+
+communicate(input=None)
+ Interact with process: Send data to stdin. Read data from stdout
+ and stderr, until end-of-file is reached. Wait for process to
+ terminate. The optional stdin argument should be a string to be
+ sent to the child process, or None, if no data should be sent to
+ the child.
+
+ communicate() returns a tuple (stdout, stderr).
+
+ Note: The data read is buffered in memory, so do not use this
+ method if the data size is large or unlimited.
+
+The following attributes are also available:
+
+stdin
+ If the stdin argument is PIPE, this attribute is a file object
+ that provides input to the child process. Otherwise, it is None.
+
+stdout
+ If the stdout argument is PIPE, this attribute is a file object
+ that provides output from the child process. Otherwise, it is
+ None.
+
+stderr
+ If the stderr argument is PIPE, this attribute is file object that
+ provides error output from the child process. Otherwise, it is
+ None.
+
+pid
+ The process ID of the child process.
+
+returncode
+ The child return code. A None value indicates that the process
+ hasn't terminated yet. A negative value -N indicates that the
+ child was terminated by signal N (UNIX only).
+
+
+Replacing older functions with the subprocess module
+====================================================
+In this section, "a ==> b" means that b can be used as a replacement
+for a.
+
+Note: All functions in this section fail (more or less) silently if
+the executed program cannot be found; this module raises an OSError
+exception.
+
+In the following examples, we assume that the subprocess module is
+imported with "from subprocess import *".
+
+
+Replacing /bin/sh shell backquote
+---------------------------------
+output=`mycmd myarg`
+==>
+output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
+
+
+Replacing shell pipe line
+-------------------------
+output=`dmesg | grep hda`
+==>
+p1 = Popen(["dmesg"], stdout=PIPE)
+p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+output = p2.communicate()[0]
+
+
+Replacing os.system()
+---------------------
+sts = os.system("mycmd" + " myarg")
+==>
+p = Popen("mycmd" + " myarg", shell=True)
+sts = os.waitpid(p.pid, 0)
+
+Note:
+
+* Calling the program through the shell is usually not required.
+
+* It's easier to look at the returncode attribute than the
+ exitstatus.
+
+A more real-world example would look like this:
+
+try:
+ retcode = call("mycmd" + " myarg", shell=True)
+ if retcode < 0:
+ print >>sys.stderr, "Child was terminated by signal", -retcode
+ else:
+ print >>sys.stderr, "Child returned", retcode
+except OSError, e:
+ print >>sys.stderr, "Execution failed:", e
+
+
+Replacing os.spawn*
+-------------------
+P_NOWAIT example:
+
+pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+pid = Popen(["/bin/mycmd", "myarg"]).pid
+
+
+P_WAIT example:
+
+retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+retcode = call(["/bin/mycmd", "myarg"])
+
+
+Vector example:
+
+os.spawnvp(os.P_NOWAIT, path, args)
+==>
+Popen([path] + args[1:])
+
+
+Environment example:
+
+os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
+==>
+Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
+
+
+Replacing os.popen*
+-------------------
+pipe = os.popen(cmd, mode='r', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
+
+pipe = os.popen(cmd, mode='w', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
+
+
+(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdin, child_stdout) = (p.stdin, p.stdout)
+
+
+(child_stdin,
+ child_stdout,
+ child_stderr) = os.popen3(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
+(child_stdin,
+ child_stdout,
+ child_stderr) = (p.stdin, p.stdout, p.stderr)
+
+
+(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
+(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
+
+
+Replacing popen2.*
+------------------
+Note: If the cmd argument to popen2 functions is a string, the command
+is executed through /bin/sh. If it is a list, the command is directly
+executed.
+
+(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
+==>
+p = Popen(["somestring"], shell=True, bufsize=bufsize
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+
+(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
+==>
+p = Popen(["mycmd", "myarg"], bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
+except that:
+
+* subprocess.Popen raises an exception if the execution fails
+* the capturestderr argument is replaced with the stderr argument.
+* stdin=PIPE and stdout=PIPE must be specified.
+* popen2 closes all filedescriptors by default, but you have to specify
+ close_fds=True with subprocess.Popen.
+
+
+"""
+
+import sys
+mswindows = (sys.platform == "win32")
+
+import os
+import types
+import traceback
+
+if mswindows:
+ import threading
+ import msvcrt
+ ## @@: Changed in Paste
+ ## Since this module is only used on pre-python-2.4 systems, they probably
+ ## don't have _subprocess installed, but hopefully have the win32 stuff
+ ## installed.
+ if 1: # <-- change this to use pywin32 instead of the _subprocess driver
+ import pywintypes
+ from win32api import GetStdHandle, STD_INPUT_HANDLE, \
+ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
+ from win32api import GetCurrentProcess, DuplicateHandle, \
+ GetModuleFileName, GetVersion
+ from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
+ from win32pipe import CreatePipe
+ from win32process import CreateProcess, STARTUPINFO, \
+ GetExitCodeProcess, STARTF_USESTDHANDLES, \
+ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
+ from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
+ else:
+ from _subprocess import *
+ class STARTUPINFO:
+ dwFlags = 0
+ hStdInput = None
+ hStdOutput = None
+ hStdError = None
+ class pywintypes:
+ error = IOError
+else:
+ import select
+ import errno
+ import fcntl
+ import pickle
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call"]
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except:
+ MAXFD = 256
+
+# True/False does not exist on 2.2.0
+try:
+ False
+except NameError:
+ False = 0
+ True = 1
+
+_active = []
+
+def _cleanup():
+ for inst in _active[:]:
+ inst.poll()
+
+PIPE = -1
+STDOUT = -2
+
+
+def call(*args, **kwargs):
+ """Run command with arguments. Wait for command to complete, then
+ return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+ """
+ return Popen(*args, **kwargs).wait()
+
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg)
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backspaces.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backspaces, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
+
+
+class Popen(object):
+ def __init__(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+ """Create new Popen instance."""
+ _cleanup()
+
+ if not isinstance(bufsize, (int, long)):
+ raise TypeError("bufsize must be an integer")
+
+ if mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows "
+ "platforms")
+ if close_fds:
+ raise ValueError("close_fds is not supported on Windows "
+ "platforms")
+ else:
+ # POSIX
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows "
+ "platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows "
+ "platforms")
+
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.universal_newlines = universal_newlines
+
+ # Input and output objects. The general principle is like
+ # this:
+ #
+ # Parent Child
+ # ------ -----
+ # p2cwrite ---stdin---> p2cread
+ # c2pread <--stdout--- c2pwrite
+ # errread <--stderr--- errwrite
+ #
+ # On POSIX, the child objects are file descriptors. On
+ # Windows, these are Windows file handles. The parent objects
+ # are file descriptors on both platforms. The parent objects
+ # are None when not using PIPEs. The child objects are None
+ # when not redirecting.
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ if p2cwrite:
+ self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
+ if c2pread:
+ if universal_newlines:
+ self.stdout = os.fdopen(c2pread, 'rU', bufsize)
+ else:
+ self.stdout = os.fdopen(c2pread, 'rb', bufsize)
+ if errread:
+ if universal_newlines:
+ self.stderr = os.fdopen(errread, 'rU', bufsize)
+ else:
+ self.stderr = os.fdopen(errread, 'rb', bufsize)
+
+ _active.append(self)
+
+
+ def _translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+ return data
+
+
+ if mswindows:
+ #
+ # Windows methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ if stdin == None and stdout == None and stderr == None:
+ return (None, None, None, None, None, None)
+
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin == None:
+ p2cread = GetStdHandle(STD_INPUT_HANDLE)
+ elif stdin == PIPE:
+ p2cread, p2cwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ p2cwrite = p2cwrite.Detach()
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
+ elif type(stdin) == types.IntType:
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ # Assuming file-like object
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout == None:
+ c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
+ elif stdout == PIPE:
+ c2pread, c2pwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ c2pread = c2pread.Detach()
+ c2pread = msvcrt.open_osfhandle(c2pread, 0)
+ elif type(stdout) == types.IntType:
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ # Assuming file-like object
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr == None:
+ errwrite = GetStdHandle(STD_ERROR_HANDLE)
+ elif stderr == PIPE:
+ errread, errwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ errread = errread.Detach()
+ errread = msvcrt.open_osfhandle(errread, 0)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif type(stderr) == types.IntType:
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ # Assuming file-like object
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _make_inheritable(self, handle):
+ """Return a duplicate of handle, which is inheritable"""
+ return DuplicateHandle(GetCurrentProcess(), handle,
+ GetCurrentProcess(), 0, 1,
+ DUPLICATE_SAME_ACCESS)
+
+
+ def _find_w9xpopen(self):
+ """Find and return absolut path to w9xpopen.exe"""
+ w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ # Eeek - file-not-found - possibly an embedding
+ # situation - see if we can locate it in sys.exec_prefix
+ w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ raise RuntimeError("Cannot locate w9xpopen.exe, which is "
+ "needed for Popen to work with your "
+ "shell or platform.")
+ return w9xpopen
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (MS Windows version)"""
+
+ if not isinstance(args, types.StringTypes):
+ args = list2cmdline(args)
+
+ # Process startup details
+ default_startupinfo = STARTUPINFO()
+ if startupinfo == None:
+ startupinfo = default_startupinfo
+ if not None in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags |= STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ if shell:
+ default_startupinfo.dwFlags |= STARTF_USESHOWWINDOW
+ default_startupinfo.wShowWindow = SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+ if (GetVersion() >= 0x80000000L or
+ os.path.basename(comspec).lower() == "command.com"):
+ # Win9x, or using command.com on NT. We need to
+ # use the w9xpopen intermediate program. For more
+ # information, see KB Q150956
+ # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
+ w9xpopen = self._find_w9xpopen()
+ args = '"%s" %s' % (w9xpopen, args)
+ # Not passing CREATE_NEW_CONSOLE has been known to
+ # cause random failures on win9x. Specifically a
+ # dialog: "Your program accessed mem currently in
+ # use at xxx" and a hopeful warning about the
+ # stability of your system. Cost is Ctrl+C wont
+ # kill children.
+ creationflags |= CREATE_NEW_CONSOLE
+
+ # Start the process
+ try:
+ hp, ht, pid, tid = CreateProcess(executable, args,
+ # no special security
+ None, None,
+ # must inherit handles to pass std
+ # handles
+ 1,
+ creationflags,
+ env,
+ cwd,
+ startupinfo)
+ except pywintypes.error, e:
+ # Translate pywintypes.error to WindowsError, which is
+ # a subclass of OSError. FIXME: We should really
+ # translate errno using _sys_errlist (or simliar), but
+ # how can this be done from Python?
+ raise WindowsError(*e.args)
+
+ # Retain the process handle, but close the thread handle
+ self._handle = hp
+ self.pid = pid
+ ht.Close()
+
+ # Child is launched. Close the parent's copy of those pipe
+ # handles that only the child should have open. You need
+ # to make sure that no handles to the write end of the
+ # output pipe are maintained in this process or else the
+ # pipe will not close when the child process exits and the
+ # ReadFile will hang.
+ if p2cread != None:
+ p2cread.Close()
+ if c2pwrite != None:
+ c2pwrite.Close()
+ if errwrite != None:
+ errwrite.Close()
+
+
+ def poll(self):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
+ self.returncode = GetExitCodeProcess(self._handle)
+ _active.remove(self)
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ obj = WaitForSingleObject(self._handle, INFINITE)
+ self.returncode = GetExitCodeProcess(self._handle)
+ _active.remove(self)
+ return self.returncode
+
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+
+
+ def communicate(self, input=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdout:
+ stdout = []
+ stdout_thread = threading.Thread(target=self._readerthread,
+ args=(self.stdout, stdout))
+ stdout_thread.setDaemon(True)
+ stdout_thread.start()
+ if self.stderr:
+ stderr = []
+ stderr_thread = threading.Thread(target=self._readerthread,
+ args=(self.stderr, stderr))
+ stderr_thread.setDaemon(True)
+ stderr_thread.start()
+
+ if self.stdin:
+ if input != None:
+ self.stdin.write(input)
+ self.stdin.close()
+
+ if self.stdout:
+ stdout_thread.join()
+ if self.stderr:
+ stderr_thread.join()
+
+ # All data exchanged. Translate lists into strings.
+ if stdout != None:
+ stdout = stdout[0]
+ if stderr != None:
+ stderr = stderr[0]
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(open, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+ else:
+ #
+ # POSIX methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin == None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = os.pipe()
+ elif type(stdin) == types.IntType:
+ p2cread = stdin
+ else:
+ # Assuming file-like object
+ p2cread = stdin.fileno()
+
+ if stdout == None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = os.pipe()
+ elif type(stdout) == types.IntType:
+ c2pwrite = stdout
+ else:
+ # Assuming file-like object
+ c2pwrite = stdout.fileno()
+
+ if stderr == None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = os.pipe()
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif type(stderr) == types.IntType:
+ errwrite = stderr
+ else:
+ # Assuming file-like object
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _set_cloexec_flag(self, fd):
+ try:
+ cloexec_flag = fcntl.FD_CLOEXEC
+ except AttributeError:
+ cloexec_flag = 1
+
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+
+
+ def _close_fds(self, but):
+ for i in range(3, MAXFD):
+ if i == but:
+ continue
+ try:
+ os.close(i)
+ except:
+ pass
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (POSIX version)"""
+
+ if isinstance(args, types.StringTypes):
+ args = [args]
+
+ if shell:
+ args = ["/bin/sh", "-c"] + args
+
+ if executable == None:
+ executable = args[0]
+
+ # For transferring possible exec failure from child to parent
+ # The first char specifies the exception type: 0 means
+ # OSError, 1 means some other error.
+ errpipe_read, errpipe_write = os.pipe()
+ self._set_cloexec_flag(errpipe_write)
+
+ self.pid = os.fork()
+ if self.pid == 0:
+ # Child
+ try:
+ # Close parent's pipe ends
+ if p2cwrite:
+ os.close(p2cwrite)
+ if c2pread:
+ os.close(c2pread)
+ if errread:
+ os.close(errread)
+ os.close(errpipe_read)
+
+ # Dup fds for child
+ if p2cread:
+ os.dup2(p2cread, 0)
+ if c2pwrite:
+ os.dup2(c2pwrite, 1)
+ if errwrite:
+ os.dup2(errwrite, 2)
+
+ # Close pipe fds. Make sure we doesn't close the same
+ # fd more than once.
+ if p2cread:
+ os.close(p2cread)
+ if c2pwrite and c2pwrite not in (p2cread,):
+ os.close(c2pwrite)
+ if errwrite and errwrite not in (p2cread, c2pwrite):
+ os.close(errwrite)
+
+ # Close all other fds, if asked for
+ if close_fds:
+ self._close_fds(but=errpipe_write)
+
+ if cwd != None:
+ os.chdir(cwd)
+
+ if preexec_fn:
+ apply(preexec_fn)
+
+ if env == None:
+ os.execvp(executable, args)
+ else:
+ os.execvpe(executable, args, env)
+
+ except:
+ exc_type, exc_value, tb = sys.exc_info()
+ # Save the traceback and attach it to the exception object
+ exc_lines = traceback.format_exception(exc_type,
+ exc_value,
+ tb)
+ exc_value.child_traceback = ''.join(exc_lines)
+ os.write(errpipe_write, pickle.dumps(exc_value))
+
+ # This exitcode won't be reported to applications, so it
+ # really doesn't matter what we return.
+ os._exit(255)
+
+ # Parent
+ os.close(errpipe_write)
+ if p2cread and p2cwrite:
+ os.close(p2cread)
+ if c2pwrite and c2pread:
+ os.close(c2pwrite)
+ if errwrite and errread:
+ os.close(errwrite)
+
+ # Wait for exec to fail or succeed; possibly raising exception
+ data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
+ os.close(errpipe_read)
+ if data != "":
+ os.waitpid(self.pid, 0)
+ child_exception = pickle.loads(data)
+ raise child_exception
+
+
+ def _handle_exitstatus(self, sts):
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ self.returncode = os.WEXITSTATUS(sts)
+ else:
+ # Should never happen
+ raise RuntimeError("Unknown child exit status!")
+
+ _active.remove(self)
+
+
+ def poll(self):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ try:
+ pid, sts = os.waitpid(self.pid, os.WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except os.error:
+ pass
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode == None:
+ pid, sts = os.waitpid(self.pid, 0)
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+
+ def communicate(self, input=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+ read_set = []
+ write_set = []
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = []
+ if self.stderr:
+ read_set.append(self.stderr)
+ stderr = []
+
+ while read_set or write_set:
+ rlist, wlist, xlist = select.select(read_set, write_set, [])
+
+ if self.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ bytes_written = os.write(self.stdin.fileno(), input[:512])
+ input = input[bytes_written:]
+ if not input:
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = os.read(self.stdout.fileno(), 1024)
+ if data == "":
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ stdout.append(data)
+
+ if self.stderr in rlist:
+ data = os.read(self.stderr.fileno(), 1024)
+ if data == "":
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ stderr.append(data)
+
+ # All data exchanged. Translate lists into strings.
+ if stdout != None:
+ stdout = ''.join(stdout)
+ if stderr != None:
+ stderr = ''.join(stderr)
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(open, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+
+def _demo_posix():
+ #
+ # Example 1: Simple redirection: Get process list
+ #
+ plist = Popen(["ps"], stdout=PIPE).communicate()[0]
+ print "Process list:"
+ print plist
+
+ #
+ # Example 2: Change uid before executing child
+ #
+ if os.getuid() == 0:
+ p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
+ p.wait()
+
+ #
+ # Example 3: Connecting several subprocesses
+ #
+ print "Looking for 'hda'..."
+ p1 = Popen(["dmesg"], stdout=PIPE)
+ p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 4: Catch execution error
+ #
+ print
+ print "Trying a weird file..."
+ try:
+ print Popen(["/this/path/does/not/exist"]).communicate()
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ print "The file didn't exist. I thought so..."
+ print "Child traceback:"
+ print e.child_traceback
+ else:
+ print "Error", e.errno
+ else:
+ print >>sys.stderr, "Gosh. No error."
+
+
+def _demo_windows():
+ #
+ # Example 1: Connecting several subprocesses
+ #
+ print "Looking for 'PROMPT' in set output..."
+ p1 = Popen("set", stdout=PIPE, shell=True)
+ p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 2: Simple execution of program
+ #
+ print "Executing calc..."
+ p = Popen("calc")
+ p.wait()
+
+
+if __name__ == "__main__":
+ if mswindows:
+ _demo_windows()
+ else:
+ _demo_posix()
diff --git a/paste/util/template.py b/paste/util/template.py
new file mode 100644
index 0000000..1e42b5a
--- /dev/null
+++ b/paste/util/template.py
@@ -0,0 +1,758 @@
+"""
+A small templating language
+
+This implements a small templating language for use internally in
+Paste and Paste Script. This language implements if/elif/else,
+for/continue/break, expressions, and blocks of Python code. The
+syntax is::
+
+ {{any expression (function calls etc)}}
+ {{any expression | filter}}
+ {{for x in y}}...{{endfor}}
+ {{if x}}x{{elif y}}y{{else}}z{{endif}}
+ {{py:x=1}}
+ {{py:
+ def foo(bar):
+ return 'baz'
+ }}
+ {{default var = default_value}}
+ {{# comment}}
+
+You use this with the ``Template`` class or the ``sub`` shortcut.
+The ``Template`` class takes the template string and the name of
+the template (for errors) and a default namespace. Then (like
+``string.Template``) you can call the ``tmpl.substitute(**kw)``
+method to make a substitution (or ``tmpl.substitute(a_dict)``).
+
+``sub(content, **kw)`` substitutes the template immediately. You
+can use ``__name='tmpl.html'`` to set the name of the template.
+
+If there are syntax errors ``TemplateError`` will be raised.
+"""
+
+import re
+import sys
+import cgi
+import urllib
+from paste.util.looper import looper
+
+__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
+ 'sub_html', 'html', 'bunch']
+
+token_re = re.compile(r'\{\{|\}\}')
+in_re = re.compile(r'\s+in\s+')
+var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+
+class TemplateError(Exception):
+ """Exception raised while parsing a template
+ """
+
+ def __init__(self, message, position, name=None):
+ self.message = message
+ self.position = position
+ self.name = name
+
+ def __str__(self):
+ msg = '%s at line %s column %s' % (
+ self.message, self.position[0], self.position[1])
+ if self.name:
+ msg += ' in %s' % self.name
+ return msg
+
+class _TemplateContinue(Exception):
+ pass
+
+class _TemplateBreak(Exception):
+ pass
+
+class Template(object):
+
+ default_namespace = {
+ 'start_braces': '{{',
+ 'end_braces': '}}',
+ 'looper': looper,
+ }
+
+ default_encoding = 'utf8'
+
+ def __init__(self, content, name=None, namespace=None):
+ self.content = content
+ self._unicode = isinstance(content, unicode)
+ self.name = name
+ self._parsed = parse(content, name=name)
+ if namespace is None:
+ namespace = {}
+ self.namespace = namespace
+
+ def from_filename(cls, filename, namespace=None, encoding=None):
+ f = open(filename, 'rb')
+ c = f.read()
+ f.close()
+ if encoding:
+ c = c.decode(encoding)
+ return cls(content=c, name=filename, namespace=namespace)
+
+ from_filename = classmethod(from_filename)
+
+ def __repr__(self):
+ return '<%s %s name=%r>' % (
+ self.__class__.__name__,
+ hex(id(self))[2:], self.name)
+
+ def substitute(self, *args, **kw):
+ if args:
+ if kw:
+ raise TypeError(
+ "You can only give positional *or* keyword arguments")
+ if len(args) > 1:
+ raise TypeError(
+ "You can only give on positional argument")
+ kw = args[0]
+ ns = self.default_namespace.copy()
+ ns.update(self.namespace)
+ ns.update(kw)
+ result = self._interpret(ns)
+ return result
+
+ def _interpret(self, ns):
+ __traceback_hide__ = True
+ parts = []
+ self._interpret_codes(self._parsed, ns, out=parts)
+ return ''.join(parts)
+
+ def _interpret_codes(self, codes, ns, out):
+ __traceback_hide__ = True
+ for item in codes:
+ if isinstance(item, basestring):
+ out.append(item)
+ else:
+ self._interpret_code(item, ns, out)
+
+ def _interpret_code(self, code, ns, out):
+ __traceback_hide__ = True
+ name, pos = code[0], code[1]
+ if name == 'py':
+ self._exec(code[2], ns, pos)
+ elif name == 'continue':
+ raise _TemplateContinue()
+ elif name == 'break':
+ raise _TemplateBreak()
+ elif name == 'for':
+ vars, expr, content = code[2], code[3], code[4]
+ expr = self._eval(expr, ns, pos)
+ self._interpret_for(vars, expr, content, ns, out)
+ elif name == 'cond':
+ parts = code[2:]
+ self._interpret_if(parts, ns, out)
+ elif name == 'expr':
+ parts = code[2].split('|')
+ base = self._eval(parts[0], ns, pos)
+ for part in parts[1:]:
+ func = self._eval(part, ns, pos)
+ base = func(base)
+ out.append(self._repr(base, pos))
+ elif name == 'default':
+ var, expr = code[2], code[3]
+ if var not in ns:
+ result = self._eval(expr, ns, pos)
+ ns[var] = result
+ elif name == 'comment':
+ return
+ else:
+ assert 0, "Unknown code: %r" % name
+
+ def _interpret_for(self, vars, expr, content, ns, out):
+ __traceback_hide__ = True
+ for item in expr:
+ if len(vars) == 1:
+ ns[vars[0]] = item
+ else:
+ if len(vars) != len(item):
+ raise ValueError(
+ 'Need %i items to unpack (got %i items)'
+ % (len(vars), len(item)))
+ for name, value in zip(vars, item):
+ ns[name] = value
+ try:
+ self._interpret_codes(content, ns, out)
+ except _TemplateContinue:
+ continue
+ except _TemplateBreak:
+ break
+
+ def _interpret_if(self, parts, ns, out):
+ __traceback_hide__ = True
+ # @@: if/else/else gets through
+ for part in parts:
+ assert not isinstance(part, basestring)
+ name, pos = part[0], part[1]
+ if name == 'else':
+ result = True
+ else:
+ result = self._eval(part[2], ns, pos)
+ if result:
+ self._interpret_codes(part[3], ns, out)
+ break
+
+ def _eval(self, code, ns, pos):
+ __traceback_hide__ = True
+ try:
+ value = eval(code, ns)
+ return value
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ if getattr(e, 'args'):
+ arg0 = e.args[0]
+ else:
+ arg0 = str(e)
+ e.args = (self._add_line_info(arg0, pos),)
+ raise exc_info[0], e, exc_info[2]
+
+ def _exec(self, code, ns, pos):
+ __traceback_hide__ = True
+ try:
+ exec code in ns
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ e.args = (self._add_line_info(e.args[0], pos),)
+ raise exc_info[0], e, exc_info[2]
+
+ def _repr(self, value, pos):
+ __traceback_hide__ = True
+ try:
+ if value is None:
+ return ''
+ if self._unicode:
+ try:
+ value = unicode(value)
+ except UnicodeDecodeError:
+ value = str(value)
+ else:
+ value = str(value)
+ except:
+ exc_info = sys.exc_info()
+ e = exc_info[1]
+ e.args = (self._add_line_info(e.args[0], pos),)
+ raise exc_info[0], e, exc_info[2]
+ else:
+ if self._unicode and isinstance(value, str):
+ if not self.decode_encoding:
+ raise UnicodeDecodeError(
+ 'Cannot decode str value %r into unicode '
+ '(no default_encoding provided)' % value)
+ value = value.decode(self.default_encoding)
+ elif not self._unicode and isinstance(value, unicode):
+ if not self.decode_encoding:
+ raise UnicodeEncodeError(
+ 'Cannot encode unicode value %r into str '
+ '(no default_encoding provided)' % value)
+ value = value.encode(self.default_encoding)
+ return value
+
+
+ def _add_line_info(self, msg, pos):
+ msg = "%s at line %s column %s" % (
+ msg, pos[0], pos[1])
+ if self.name:
+ msg += " in file %s" % self.name
+ return msg
+
+def sub(content, **kw):
+ name = kw.get('__name')
+ tmpl = Template(content, name=name)
+ return tmpl.substitute(kw)
+ return result
+
+def paste_script_template_renderer(content, vars, filename=None):
+ tmpl = Template(content, name=filename)
+ return tmpl.substitute(vars)
+
+class bunch(dict):
+
+ def __init__(self, **kw):
+ for name, value in kw.items():
+ setattr(self, name, value)
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(self, key):
+ if 'default' in self:
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ return dict.__getitem__(self, 'default')
+ else:
+ return dict.__getitem__(self, key)
+
+ def __repr__(self):
+ items = [
+ (k, v) for k, v in self.items()]
+ items.sort()
+ return '<%s %s>' % (
+ self.__class__.__name__,
+ ' '.join(['%s=%r' % (k, v) for k, v in items]))
+
+############################################################
+## HTML Templating
+############################################################
+
+class html(object):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return self.value
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__, self.value)
+
+def html_quote(value):
+ if value is None:
+ return ''
+ if not isinstance(value, basestring):
+ if hasattr(value, '__unicode__'):
+ value = unicode(value)
+ else:
+ value = str(value)
+ value = cgi.escape(value, 1)
+ if isinstance(value, unicode):
+ value = value.encode('ascii', 'xmlcharrefreplace')
+ return value
+
+def url(v):
+ if not isinstance(v, basestring):
+ if hasattr(v, '__unicode__'):
+ v = unicode(v)
+ else:
+ v = str(v)
+ if isinstance(v, unicode):
+ v = v.encode('utf8')
+ return urllib.quote(v)
+
+def attr(**kw):
+ kw = kw.items()
+ kw.sort()
+ parts = []
+ for name, value in kw:
+ if value is None:
+ continue
+ if name.endswith('_'):
+ name = name[:-1]
+ parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
+ return html(' '.join(parts))
+
+class HTMLTemplate(Template):
+
+ default_namespace = Template.default_namespace.copy()
+ default_namespace.update(dict(
+ html=html,
+ attr=attr,
+ url=url,
+ ))
+
+ def _repr(self, value, pos):
+ plain = Template._repr(self, value, pos)
+ if isinstance(value, html):
+ return plain
+ else:
+ return html_quote(plain)
+
+def sub_html(content, **kw):
+ name = kw.get('__name')
+ tmpl = HTMLTemplate(content, name=name)
+ return tmpl.substitute(kw)
+ return result
+
+
+############################################################
+## Lexing and Parsing
+############################################################
+
+def lex(s, name=None, trim_whitespace=True):
+ """
+ Lex a string into chunks:
+
+ >>> lex('hey')
+ ['hey']
+ >>> lex('hey {{you}}')
+ ['hey ', ('you', (1, 7))]
+ >>> lex('hey {{')
+ Traceback (most recent call last):
+ ...
+ TemplateError: No }} to finish last expression at line 1 column 7
+ >>> lex('hey }}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: }} outside expression at line 1 column 7
+ >>> lex('hey {{ {{')
+ Traceback (most recent call last):
+ ...
+ TemplateError: {{ inside expression at line 1 column 10
+
+ """
+ in_expr = False
+ chunks = []
+ last = 0
+ last_pos = (1, 1)
+ for match in token_re.finditer(s):
+ expr = match.group(0)
+ pos = find_position(s, match.end())
+ if expr == '{{' and in_expr:
+ raise TemplateError('{{ inside expression', position=pos,
+ name=name)
+ elif expr == '}}' and not in_expr:
+ raise TemplateError('}} outside expression', position=pos,
+ name=name)
+ if expr == '{{':
+ part = s[last:match.start()]
+ if part:
+ chunks.append(part)
+ in_expr = True
+ else:
+ chunks.append((s[last:match.start()], last_pos))
+ in_expr = False
+ last = match.end()
+ last_pos = pos
+ if in_expr:
+ raise TemplateError('No }} to finish last expression',
+ name=name, position=last_pos)
+ part = s[last:]
+ if part:
+ chunks.append(part)
+ if trim_whitespace:
+ chunks = trim_lex(chunks)
+ return chunks
+
+statement_re = re.compile(r'^(?:if |elif |else |for |py:)')
+single_statements = ['endif', 'endfor', 'continue', 'break']
+trail_whitespace_re = re.compile(r'\n[\t ]*$')
+lead_whitespace_re = re.compile(r'^[\t ]*\n')
+
+def trim_lex(tokens):
+ r"""
+ Takes a lexed set of tokens, and removes whitespace when there is
+ a directive on a line by itself:
+
+ >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+ >>> tokens
+ [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+ >>> trim_lex(tokens)
+ [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+ """
+ for i in range(len(tokens)):
+ current = tokens[i]
+ if isinstance(tokens[i], basestring):
+ # we don't trim this
+ continue
+ item = current[0]
+ if not statement_re.search(item) and item not in single_statements:
+ continue
+ if not i:
+ prev = ''
+ else:
+ prev = tokens[i-1]
+ if i+1 >= len(tokens):
+ next = ''
+ else:
+ next = tokens[i+1]
+ if (not isinstance(next, basestring)
+ or not isinstance(prev, basestring)):
+ continue
+ if ((not prev or trail_whitespace_re.search(prev))
+ and (not next or lead_whitespace_re.search(next))):
+ if prev:
+ m = trail_whitespace_re.search(prev)
+ # +1 to leave the leading \n on:
+ prev = prev[:m.start()+1]
+ tokens[i-1] = prev
+ if next:
+ m = lead_whitespace_re.search(next)
+ next = next[m.end():]
+ tokens[i+1] = next
+ return tokens
+
+
+def find_position(string, index):
+ """Given a string and index, return (line, column)"""
+ leading = string[:index].splitlines()
+ return (len(leading), len(leading[-1])+1)
+
+def parse(s, name=None):
+ r"""
+ Parses a string into a kind of AST
+
+ >>> parse('{{x}}')
+ [('expr', (1, 3), 'x')]
+ >>> parse('foo')
+ ['foo']
+ >>> parse('{{if x}}test{{endif}}')
+ [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+ >>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
+ ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+ >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+ [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+ >>> parse('{{py:x=1}}')
+ [('py', (1, 3), 'x=1')]
+ >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
+ [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+ Some exceptions::
+
+ >>> parse('{{continue}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: continue outside of for loop at line 1 column 3
+ >>> parse('{{if x}}foo')
+ Traceback (most recent call last):
+ ...
+ TemplateError: No {{endif}} at line 1 column 3
+ >>> parse('{{else}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: else outside of an if block at line 1 column 3
+ >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Unexpected endif at line 1 column 25
+ >>> parse('{{if}}{{endif}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: if with no expression at line 1 column 3
+ >>> parse('{{for x y}}{{endfor}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+ >>> parse('{{py:x=1\ny=2}}')
+ Traceback (most recent call last):
+ ...
+ TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
+ """
+ tokens = lex(s, name=name)
+ result = []
+ while tokens:
+ next, tokens = parse_expr(tokens, name)
+ result.append(next)
+ return result
+
+def parse_expr(tokens, name, context=()):
+ if isinstance(tokens[0], basestring):
+ return tokens[0], tokens[1:]
+ expr, pos = tokens[0]
+ expr = expr.strip()
+ if expr.startswith('py:'):
+ expr = expr[3:].lstrip(' \t')
+ if expr.startswith('\n'):
+ expr = expr[1:]
+ else:
+ if '\n' in expr:
+ raise TemplateError(
+ 'Multi-line py blocks must start with a newline',
+ position=pos, name=name)
+ return ('py', pos, expr), tokens[1:]
+ elif expr in ('continue', 'break'):
+ if 'for' not in context:
+ raise TemplateError(
+ 'continue outside of for loop',
+ position=pos, name=name)
+ return (expr, pos), tokens[1:]
+ elif expr.startswith('if '):
+ return parse_cond(tokens, name, context)
+ elif (expr.startswith('elif ')
+ or expr == 'else'):
+ raise TemplateError(
+ '%s outside of an if block' % expr.split()[0],
+ position=pos, name=name)
+ elif expr in ('if', 'elif', 'for'):
+ raise TemplateError(
+ '%s with no expression' % expr,
+ position=pos, name=name)
+ elif expr in ('endif', 'endfor'):
+ raise TemplateError(
+ 'Unexpected %s' % expr,
+ position=pos, name=name)
+ elif expr.startswith('for '):
+ return parse_for(tokens, name, context)
+ elif expr.startswith('default '):
+ return parse_default(tokens, name, context)
+ elif expr.startswith('#'):
+ return ('comment', pos, tokens[0][0]), tokens[1:]
+ return ('expr', pos, tokens[0][0]), tokens[1:]
+
+def parse_cond(tokens, name, context):
+ start = tokens[0][1]
+ pieces = []
+ context = context + ('if',)
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'Missing {{endif}}',
+ position=start, name=name)
+ if (isinstance(tokens[0], tuple)
+ and tokens[0][0] == 'endif'):
+ return ('cond', start) + tuple(pieces), tokens[1:]
+ next, tokens = parse_one_cond(tokens, name, context)
+ pieces.append(next)
+
+def parse_one_cond(tokens, name, context):
+ (first, pos), tokens = tokens[0], tokens[1:]
+ content = []
+ if first.endswith(':'):
+ first = first[:-1]
+ if first.startswith('if '):
+ part = ('if', pos, first[3:].lstrip(), content)
+ elif first.startswith('elif '):
+ part = ('elif', pos, first[5:].lstrip(), content)
+ elif first == 'else':
+ part = ('else', pos, None, content)
+ else:
+ assert 0, "Unexpected token %r at %s" % (first, pos)
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'No {{endif}}',
+ position=pos, name=name)
+ if (isinstance(tokens[0], tuple)
+ and (tokens[0][0] == 'endif'
+ or tokens[0][0].startswith('elif ')
+ or tokens[0][0] == 'else')):
+ return part, tokens
+ next, tokens = parse_expr(tokens, name, context)
+ content.append(next)
+
+def parse_for(tokens, name, context):
+ first, pos = tokens[0]
+ tokens = tokens[1:]
+ context = ('for',) + context
+ content = []
+ assert first.startswith('for ')
+ if first.endswith(':'):
+ first = first[:-1]
+ first = first[3:].strip()
+ match = in_re.search(first)
+ if not match:
+ raise TemplateError(
+ 'Bad for (no "in") in %r' % first,
+ position=pos, name=name)
+ vars = first[:match.start()]
+ if '(' in vars:
+ raise TemplateError(
+ 'You cannot have () in the variable section of a for loop (%r)'
+ % vars, position=pos, name=name)
+ vars = tuple([
+ v.strip() for v in first[:match.start()].split(',')
+ if v.strip()])
+ expr = first[match.end():]
+ while 1:
+ if not tokens:
+ raise TemplateError(
+ 'No {{endfor}}',
+ position=pos, name=name)
+ if (isinstance(tokens[0], tuple)
+ and tokens[0][0] == 'endfor'):
+ return ('for', pos, vars, expr, content), tokens[1:]
+ next, tokens = parse_expr(tokens, name, context)
+ content.append(next)
+
+def parse_default(tokens, name, context):
+ first, pos = tokens[0]
+ assert first.startswith('default ')
+ first = first.split(None, 1)[1]
+ parts = first.split('=', 1)
+ if len(parts) == 1:
+ raise TemplateError(
+ "Expression must be {{default var=value}}; no = found in %r" % first,
+ position=pos, name=name)
+ var = parts[0].strip()
+ if ',' in var:
+ raise TemplateError(
+ "{{default x, y = ...}} is not supported",
+ position=pos, name=name)
+ if not var_re.search(var):
+ raise TemplateError(
+ "Not a valid variable name for {{default}}: %r"
+ % var, position=pos, name=name)
+ expr = parts[1].strip()
+ return ('default', pos, var, expr), tokens[1:]
+
+_fill_command_usage = """\
+%prog [OPTIONS] TEMPLATE arg=value
+
+Use py:arg=value to set a Python value; otherwise all values are
+strings.
+"""
+
+def fill_command(args=None):
+ import sys, optparse, pkg_resources, os
+ if args is None:
+ args = sys.argv[1:]
+ dist = pkg_resources.get_distribution('Paste')
+ parser = optparse.OptionParser(
+ version=str(dist),
+ usage=_fill_command_usage)
+ parser.add_option(
+ '-o', '--output',
+ dest='output',
+ metavar="FILENAME",
+ help="File to write output to (default stdout)")
+ parser.add_option(
+ '--html',
+ dest='use_html',
+ action='store_true',
+ help="Use HTML style filling (including automatic HTML quoting)")
+ parser.add_option(
+ '--env',
+ dest='use_env',
+ action='store_true',
+ help="Put the environment in as top-level variables")
+ options, args = parser.parse_args(args)
+ if len(args) < 1:
+ print 'You must give a template filename'
+ print dir(parser)
+ assert 0
+ template_name = args[0]
+ args = args[1:]
+ vars = {}
+ if options.use_env:
+ vars.update(os.environ)
+ for value in args:
+ if '=' not in value:
+ print 'Bad argument: %r' % value
+ sys.exit(2)
+ name, value = value.split('=', 1)
+ if name.startswith('py:'):
+ name = name[:3]
+ value = eval(value)
+ vars[name] = value
+ if template_name == '-':
+ template_content = sys.stdin.read()
+ template_name = '<stdin>'
+ else:
+ f = open(template_name, 'rb')
+ template_content = f.read()
+ f.close()
+ if options.use_html:
+ TemplateClass = HTMLTemplate
+ else:
+ TemplateClass = Template
+ template = TemplateClass(template_content, name=template_name)
+ result = template.substitute(vars)
+ if options.output:
+ f = open(options.output, 'wb')
+ f.write(result)
+ f.close()
+ else:
+ sys.stdout.write(result)
+
+if __name__ == '__main__':
+ from paste.util.template import fill_command
+ fill_command()
+
+
diff --git a/paste/util/threadedprint.py b/paste/util/threadedprint.py
new file mode 100644
index 0000000..ae7dc0d
--- /dev/null
+++ b/paste/util/threadedprint.py
@@ -0,0 +1,250 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+threadedprint.py
+================
+
+:author: Ian Bicking
+:date: 12 Jul 2004
+
+Multi-threaded printing; allows the output produced via print to be
+separated according to the thread.
+
+To use this, you must install the catcher, like::
+
+ threadedprint.install()
+
+The installation optionally takes one of three parameters:
+
+default
+ The default destination for print statements (e.g., ``sys.stdout``).
+factory
+ A function that will produce the stream for a thread, given the
+ thread's name.
+paramwriter
+ Instead of writing to a file-like stream, this function will be
+ called like ``paramwriter(thread_name, text)`` for every write.
+
+The thread name is the value returned by
+``threading.currentThread().getName()``, a string (typically something
+like Thread-N).
+
+You can also submit file-like objects for specific threads, which will
+override any of these parameters. To do this, call ``register(stream,
+[threadName])``. ``threadName`` is optional, and if not provided the
+stream will be registered for the current thread.
+
+If no specific stream is registered for a thread, and no default has
+been provided, then an error will occur when anything is written to
+``sys.stdout`` (or printed).
+
+Note: the stream's ``write`` method will be called in the thread the
+text came from, so you should consider thread safety, especially if
+multiple threads share the same writer.
+
+Note: if you want access to the original standard out, use
+``sys.__stdout__``.
+
+You may also uninstall this, via::
+
+ threadedprint.uninstall()
+
+TODO
+----
+
+* Something with ``sys.stderr``.
+* Some default handlers. Maybe something that hooks into `logging`.
+* Possibly cache the results of ``factory`` calls. This would be a
+ semantic change.
+
+"""
+
+import threading
+import sys
+from paste.util import filemixin
+
+class PrintCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None,
+ leave_stdout=False):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, (
+ "You can only provide one of default, factory, or paramwriter")
+ if leave_stdout:
+ assert not default, (
+ "You cannot pass in both default (%r) and "
+ "leave_stdout=True" % default)
+ default = sys.stdout
+ if default:
+ self._defaultfunc = self._writedefault
+ elif factory:
+ self._defaultfunc = self._writefactory
+ elif paramwriter:
+ self._defaultfunc = self._writeparam
+ else:
+ self._defaultfunc = self._writeerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def write(self, v, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ self._defaultfunc(name, v)
+ else:
+ catcher = catchers[name]
+ catcher.write(v)
+
+ def seek(self, *args):
+ # Weird, but Google App Engine is seeking on stdout
+ name = threading.currentThread().getName()
+ catchers = self._catchers
+ if not name in catchers:
+ self._default.seek(*args)
+ else:
+ catchers[name].seek(*args)
+
+ def read(self, *args):
+ name = threading.currentThread().getName()
+ catchers = self._catchers
+ if not name in catchers:
+ self._default.read(*args)
+ else:
+ catchers[name].read(*args)
+
+
+ def _writedefault(self, name, v):
+ self._default.write(v)
+
+ def _writefactory(self, name, v):
+ self._factory(name).write(v)
+
+ def _writeparam(self, name, v):
+ self._paramwriter(name, v)
+
+ def _writeerror(self, name, v):
+ assert False, (
+ "There is no PrintCatcher output stream for the thread %r"
+ % name)
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), (
+ "There is no PrintCatcher catcher for the thread %r" % name)
+ del self._catchers[name]
+
+_printcatcher = None
+_oldstdout = None
+
+def install(**kw):
+ global _printcatcher, _oldstdout, register, deregister
+ if (not _printcatcher or sys.stdout is not _printcatcher):
+ _oldstdout = sys.stdout
+ _printcatcher = sys.stdout = PrintCatcher(**kw)
+ register = _printcatcher.register
+ deregister = _printcatcher.deregister
+
+def uninstall():
+ global _printcatcher, _oldstdout, register, deregister
+ if _printcatcher:
+ sys.stdout = _oldstdout
+ _printcatcher = _oldstdout = None
+ register = not_installed_error
+ deregister = not_installed_error
+
+def not_installed_error(*args, **kw):
+ assert False, (
+ "threadedprint has not yet been installed (call "
+ "threadedprint.install())")
+
+register = deregister = not_installed_error
+
+class StdinCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, (
+ "You can only provide one of default, factory, or paramwriter")
+ if default:
+ self._defaultfunc = self._readdefault
+ elif factory:
+ self._defaultfunc = self._readfactory
+ elif paramwriter:
+ self._defaultfunc = self._readparam
+ else:
+ self._defaultfunc = self._readerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def read(self, size=None, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ return self._defaultfunc(name, size)
+ else:
+ catcher = catchers[name]
+ return catcher.read(size)
+
+ def _readdefault(self, name, size):
+ self._default.read(size)
+
+ def _readfactory(self, name, size):
+ self._factory(name).read(size)
+
+ def _readparam(self, name, size):
+ self._paramreader(name, size)
+
+ def _readerror(self, name, size):
+ assert False, (
+ "There is no StdinCatcher output stream for the thread %r"
+ % name)
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), (
+ "There is no StdinCatcher catcher for the thread %r" % name)
+ del self._catchers[name]
+
+_stdincatcher = None
+_oldstdin = None
+
+def install_stdin(**kw):
+ global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
+ if not _stdincatcher:
+ _oldstdin = sys.stdin
+ _stdincatcher = sys.stdin = StdinCatcher(**kw)
+ register_stdin = _stdincatcher.register
+ deregister_stdin = _stdincatcher.deregister
+
+def uninstall():
+ global _stdincatcher, _oldstin, register_stdin, deregister_stdin
+ if _stdincatcher:
+ sys.stdin = _oldstdin
+ _stdincatcher = _oldstdin = None
+ register_stdin = deregister_stdin = not_installed_error_stdin
+
+def not_installed_error_stdin(*args, **kw):
+ assert False, (
+ "threadedprint has not yet been installed for stdin (call "
+ "threadedprint.install_stdin())")
diff --git a/paste/util/threadinglocal.py b/paste/util/threadinglocal.py
new file mode 100644
index 0000000..06f2643
--- /dev/null
+++ b/paste/util/threadinglocal.py
@@ -0,0 +1,43 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""
+Implementation of thread-local storage, for Python versions that don't
+have thread local storage natively.
+"""
+
+try:
+ import threading
+except ImportError:
+ # No threads, so "thread local" means process-global
+ class local(object):
+ pass
+else:
+ try:
+ local = threading.local
+ except AttributeError:
+ # Added in 2.4, but now we'll have to define it ourselves
+ import thread
+ class local(object):
+
+ def __init__(self):
+ self.__dict__['__objs'] = {}
+
+ def __getattr__(self, attr, g=thread.get_ident):
+ try:
+ return self.__dict__['__objs'][g()][attr]
+ except KeyError:
+ raise AttributeError(
+ "No variable %s defined for the thread %s"
+ % (attr, g()))
+
+ def __setattr__(self, attr, value, g=thread.get_ident):
+ self.__dict__['__objs'].setdefault(g(), {})[attr] = value
+
+ def __delattr__(self, attr, g=thread.get_ident):
+ try:
+ del self.__dict__['__objs'][g()][attr]
+ except KeyError:
+ raise AttributeError(
+ "No variable %s defined for thread %s"
+ % (attr, g()))
+
diff --git a/paste/wsgilib.py b/paste/wsgilib.py
new file mode 100644
index 0000000..67ced97
--- /dev/null
+++ b/paste/wsgilib.py
@@ -0,0 +1,597 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+"""
+A module of many disparate routines.
+"""
+
+# functions which moved to paste.request and paste.response
+# Deprecated around 15 Dec 2005
+from paste.request import get_cookies, parse_querystring, parse_formvars
+from paste.request import construct_url, path_info_split, path_info_pop
+from paste.response import HeaderDict, has_header, header_value, remove_header
+from paste.response import error_body_response, error_response, error_response_app
+
+from traceback import print_exception
+import urllib
+from cStringIO import StringIO
+import sys
+from urlparse import urlsplit
+import warnings
+
+__all__ = ['add_close', 'add_start_close', 'capture_output', 'catch_errors',
+ 'catch_errors_app', 'chained_app_iters', 'construct_url',
+ 'dump_environ', 'encode_unicode_app_iter', 'error_body_response',
+ 'error_response', 'get_cookies', 'has_header', 'header_value',
+ 'interactive', 'intercept_output', 'path_info_pop',
+ 'path_info_split', 'raw_interactive', 'send_file']
+
+class add_close(object):
+ """
+ An an iterable that iterates over app_iter, then calls
+ close_func.
+ """
+
+ def __init__(self, app_iterable, close_func):
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.close_func = close_func
+ self._closed = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return self.app_iter.next()
+
+ def close(self):
+ self._closed = True
+ if hasattr(self.app_iterable, 'close'):
+ self.app_iterable.close()
+ self.close_func()
+
+ def __del__(self):
+ if not self._closed:
+ # We can't raise an error or anything at this stage
+ print >> sys.stderr, (
+ "Error: app_iter.close() was not called when finishing "
+ "WSGI request. finalization function %s not called"
+ % self.close_func)
+
+class add_start_close(object):
+ """
+ An an iterable that iterates over app_iter, calls start_func
+ before the first item is returned, then calls close_func at the
+ end.
+ """
+
+ def __init__(self, app_iterable, start_func, close_func=None):
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.first = True
+ self.start_func = start_func
+ self.close_func = close_func
+ self._closed = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.first:
+ self.start_func()
+ self.first = False
+ return self.app_iter.next()
+
+ def close(self):
+ self._closed = True
+ if hasattr(self.app_iterable, 'close'):
+ self.app_iterable.close()
+ if self.close_func is not None:
+ self.close_func()
+
+ def __del__(self):
+ if not self._closed:
+ # We can't raise an error or anything at this stage
+ print >> sys.stderr, (
+ "Error: app_iter.close() was not called when finishing "
+ "WSGI request. finalization function %s not called"
+ % self.close_func)
+
+class chained_app_iters(object):
+
+ """
+ Chains several app_iters together, also delegating .close() to each
+ of them.
+ """
+
+ def __init__(self, *chained):
+ self.app_iters = chained
+ self.chained = [iter(item) for item in chained]
+ self._closed = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if len(self.chained) == 1:
+ return self.chained[0].next()
+ else:
+ try:
+ return self.chained[0].next()
+ except StopIteration:
+ self.chained.pop(0)
+ return self.next()
+
+ def close(self):
+ self._closed = True
+ got_exc = None
+ for app_iter in self.app_iters:
+ try:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ except:
+ got_exc = sys.exc_info()
+ if got_exc:
+ raise got_exc[0], got_exc[1], got_exc[2]
+
+ def __del__(self):
+ if not self._closed:
+ # We can't raise an error or anything at this stage
+ print >> sys.stderr, (
+ "Error: app_iter.close() was not called when finishing "
+ "WSGI request. finalization function %s not called"
+ % self.close_func)
+
+class encode_unicode_app_iter(object):
+ """
+ Encodes an app_iterable's unicode responses as strings
+ """
+
+ def __init__(self, app_iterable, encoding=sys.getdefaultencoding(),
+ errors='strict'):
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.encoding = encoding
+ self.errors = errors
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ content = self.app_iter.next()
+ if isinstance(content, unicode):
+ content = content.encode(self.encoding, self.errors)
+ return content
+
+ def close(self):
+ if hasattr(self.app_iterable, 'close'):
+ self.app_iterable.close()
+
+def catch_errors(application, environ, start_response, error_callback,
+ ok_callback=None):
+ """
+ Runs the application, and returns the application iterator (which should be
+ passed upstream). If an error occurs then error_callback will be called with
+ exc_info as its sole argument. If no errors occur and ok_callback is given,
+ then it will be called with no arguments.
+ """
+ try:
+ app_iter = application(environ, start_response)
+ except:
+ error_callback(sys.exc_info())
+ raise
+ if type(app_iter) in (list, tuple):
+ # These won't produce exceptions
+ if ok_callback:
+ ok_callback()
+ return app_iter
+ else:
+ return _wrap_app_iter(app_iter, error_callback, ok_callback)
+
+class _wrap_app_iter(object):
+
+ def __init__(self, app_iterable, error_callback, ok_callback):
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.error_callback = error_callback
+ self.ok_callback = ok_callback
+ if hasattr(self.app_iterable, 'close'):
+ self.close = self.app_iterable.close
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ try:
+ return self.app_iter.next()
+ except StopIteration:
+ if self.ok_callback:
+ self.ok_callback()
+ raise
+ except:
+ self.error_callback(sys.exc_info())
+ raise
+
+def catch_errors_app(application, environ, start_response, error_callback_app,
+ ok_callback=None, catch=Exception):
+ """
+ Like ``catch_errors``, except error_callback_app should be a
+ callable that will receive *three* arguments -- ``environ``,
+ ``start_response``, and ``exc_info``. It should call
+ ``start_response`` (*with* the exc_info argument!) and return an
+ iterator.
+ """
+ try:
+ app_iter = application(environ, start_response)
+ except catch:
+ return error_callback_app(environ, start_response, sys.exc_info())
+ if type(app_iter) in (list, tuple):
+ # These won't produce exceptions
+ if ok_callback is not None:
+ ok_callback()
+ return app_iter
+ else:
+ return _wrap_app_iter_app(
+ environ, start_response, app_iter,
+ error_callback_app, ok_callback, catch=catch)
+
+class _wrap_app_iter_app(object):
+
+ def __init__(self, environ, start_response, app_iterable,
+ error_callback_app, ok_callback, catch=Exception):
+ self.environ = environ
+ self.start_response = start_response
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.error_callback_app = error_callback_app
+ self.ok_callback = ok_callback
+ self.catch = catch
+ if hasattr(self.app_iterable, 'close'):
+ self.close = self.app_iterable.close
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ try:
+ return self.app_iter.next()
+ except StopIteration:
+ if self.ok_callback:
+ self.ok_callback()
+ raise
+ except self.catch:
+ if hasattr(self.app_iterable, 'close'):
+ try:
+ self.app_iterable.close()
+ except:
+ # @@: Print to wsgi.errors?
+ pass
+ new_app_iterable = self.error_callback_app(
+ self.environ, self.start_response, sys.exc_info())
+ app_iter = iter(new_app_iterable)
+ if hasattr(new_app_iterable, 'close'):
+ self.close = new_app_iterable.close
+ self.next = app_iter.next
+ return self.next()
+
+def raw_interactive(application, path='', raise_on_wsgi_error=False,
+ **environ):
+ """
+ Runs the application in a fake environment.
+ """
+ assert "path_info" not in environ, "argument list changed"
+ if raise_on_wsgi_error:
+ errors = ErrorRaiser()
+ else:
+ errors = StringIO()
+ basic_environ = {
+ # mandatory CGI variables
+ 'REQUEST_METHOD': 'GET', # always mandatory
+ 'SCRIPT_NAME': '', # may be empty if app is at the root
+ 'PATH_INFO': '', # may be empty if at root of app
+ 'SERVER_NAME': 'localhost', # always mandatory
+ 'SERVER_PORT': '80', # always mandatory
+ 'SERVER_PROTOCOL': 'HTTP/1.0',
+ # mandatory wsgi variables
+ 'wsgi.version': (1, 0),
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.input': StringIO(''),
+ 'wsgi.errors': errors,
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ }
+ if path:
+ (_, _, path_info, query, fragment) = urlsplit(str(path))
+ path_info = urllib.unquote(path_info)
+ # urlsplit returns unicode so coerce it back to str
+ path_info, query = str(path_info), str(query)
+ basic_environ['PATH_INFO'] = path_info
+ if query:
+ basic_environ['QUERY_STRING'] = query
+ for name, value in environ.items():
+ name = name.replace('__', '.')
+ basic_environ[name] = value
+ if ('SERVER_NAME' in basic_environ
+ and 'HTTP_HOST' not in basic_environ):
+ basic_environ['HTTP_HOST'] = basic_environ['SERVER_NAME']
+ istream = basic_environ['wsgi.input']
+ if isinstance(istream, str):
+ basic_environ['wsgi.input'] = StringIO(istream)
+ basic_environ['CONTENT_LENGTH'] = len(istream)
+ data = {}
+ output = []
+ headers_set = []
+ headers_sent = []
+ def start_response(status, headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception only if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ # avoid dangling circular reference
+ exc_info = None
+ elif headers_set:
+ # You cannot set the headers more than once, unless the
+ # exc_info is provided.
+ raise AssertionError("Headers already set and no exc_info!")
+ headers_set.append(True)
+ data['status'] = status
+ data['headers'] = headers
+ return output.append
+ app_iter = application(basic_environ, start_response)
+ try:
+ try:
+ for s in app_iter:
+ if not isinstance(s, str):
+ raise ValueError(
+ "The app_iter response can only contain str (not "
+ "unicode); got: %r" % s)
+ headers_sent.append(True)
+ if not headers_set:
+ raise AssertionError("Content sent w/o headers!")
+ output.append(s)
+ except TypeError, e:
+ # Typically "iteration over non-sequence", so we want
+ # to give better debugging information...
+ e.args = ((e.args[0] + ' iterable: %r' % app_iter),) + e.args[1:]
+ raise
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ return (data['status'], data['headers'], ''.join(output),
+ errors.getvalue())
+
+class ErrorRaiser(object):
+
+ def flush(self):
+ pass
+
+ def write(self, value):
+ if not value:
+ return
+ raise AssertionError(
+ "No errors should be written (got: %r)" % value)
+
+ def writelines(self, seq):
+ raise AssertionError(
+ "No errors should be written (got lines: %s)" % list(seq))
+
+ def getvalue(self):
+ return ''
+
+def interactive(*args, **kw):
+ """
+ Runs the application interatively, wrapping `raw_interactive` but
+ returning the output in a formatted way.
+ """
+ status, headers, content, errors = raw_interactive(*args, **kw)
+ full = StringIO()
+ if errors:
+ full.write('Errors:\n')
+ full.write(errors.strip())
+ full.write('\n----------end errors\n')
+ full.write(status + '\n')
+ for name, value in headers:
+ full.write('%s: %s\n' % (name, value))
+ full.write('\n')
+ full.write(content)
+ return full.getvalue()
+interactive.proxy = 'raw_interactive'
+
+def dump_environ(environ, start_response):
+ """
+ Application which simply dumps the current environment
+ variables out as a plain text response.
+ """
+ output = []
+ keys = environ.keys()
+ keys.sort()
+ for k in keys:
+ v = str(environ[k]).replace("\n","\n ")
+ output.append("%s: %s\n" % (k, v))
+ output.append("\n")
+ content_length = environ.get("CONTENT_LENGTH", '')
+ if content_length:
+ output.append(environ['wsgi.input'].read(int(content_length)))
+ output.append("\n")
+ output = "".join(output)
+ headers = [('Content-Type', 'text/plain'),
+ ('Content-Length', str(len(output)))]
+ start_response("200 OK", headers)
+ return [output]
+
+def send_file(filename):
+ warnings.warn(
+ "wsgilib.send_file has been moved to paste.fileapp.FileApp",
+ DeprecationWarning, 2)
+ from paste import fileapp
+ return fileapp.FileApp(filename)
+
+def capture_output(environ, start_response, application):
+ """
+ Runs application with environ and start_response, and captures
+ status, headers, and body.
+
+ Sends status and header, but *not* body. Returns (status,
+ headers, body). Typically this is used like:
+
+ .. code-block:: python
+
+ def dehtmlifying_middleware(application):
+ def replacement_app(environ, start_response):
+ status, headers, body = capture_output(
+ environ, start_response, application)
+ content_type = header_value(headers, 'content-type')
+ if (not content_type
+ or not content_type.startswith('text/html')):
+ return [body]
+ body = re.sub(r'<.*?>', '', body)
+ return [body]
+ return replacement_app
+
+ """
+ warnings.warn(
+ 'wsgilib.capture_output has been deprecated in favor '
+ 'of wsgilib.intercept_output',
+ DeprecationWarning, 2)
+ data = []
+ output = StringIO()
+ def replacement_start_response(status, headers, exc_info=None):
+ if data:
+ data[:] = []
+ data.append(status)
+ data.append(headers)
+ start_response(status, headers, exc_info)
+ return output.write
+ app_iter = application(environ, replacement_start_response)
+ try:
+ for item in app_iter:
+ output.write(item)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ if not data:
+ data.append(None)
+ if len(data) < 2:
+ data.append(None)
+ data.append(output.getvalue())
+ return data
+
+def intercept_output(environ, application, conditional=None,
+ start_response=None):
+ """
+ Runs application with environ and captures status, headers, and
+ body. None are sent on; you must send them on yourself (unlike
+ ``capture_output``)
+
+ Typically this is used like:
+
+ .. code-block:: python
+
+ def dehtmlifying_middleware(application):
+ def replacement_app(environ, start_response):
+ status, headers, body = intercept_output(
+ environ, application)
+ start_response(status, headers)
+ content_type = header_value(headers, 'content-type')
+ if (not content_type
+ or not content_type.startswith('text/html')):
+ return [body]
+ body = re.sub(r'<.*?>', '', body)
+ return [body]
+ return replacement_app
+
+ A third optional argument ``conditional`` should be a function
+ that takes ``conditional(status, headers)`` and returns False if
+ the request should not be intercepted. In that case
+ ``start_response`` will be called and ``(None, None, app_iter)``
+ will be returned. You must detect that in your code and return
+ the app_iter, like:
+
+ .. code-block:: python
+
+ def dehtmlifying_middleware(application):
+ def replacement_app(environ, start_response):
+ status, headers, body = intercept_output(
+ environ, application,
+ lambda s, h: header_value(headers, 'content-type').startswith('text/html'),
+ start_response)
+ if status is None:
+ return body
+ start_response(status, headers)
+ body = re.sub(r'<.*?>', '', body)
+ return [body]
+ return replacement_app
+ """
+ if conditional is not None and start_response is None:
+ raise TypeError(
+ "If you provide conditional you must also provide "
+ "start_response")
+ data = []
+ output = StringIO()
+ def replacement_start_response(status, headers, exc_info=None):
+ if conditional is not None and not conditional(status, headers):
+ data.append(None)
+ return start_response(status, headers, exc_info)
+ if data:
+ data[:] = []
+ data.append(status)
+ data.append(headers)
+ return output.write
+ app_iter = application(environ, replacement_start_response)
+ if data[0] is None:
+ return (None, None, app_iter)
+ try:
+ for item in app_iter:
+ output.write(item)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ if not data:
+ data.append(None)
+ if len(data) < 2:
+ data.append(None)
+ data.append(output.getvalue())
+ return data
+
+## Deprecation warning wrapper:
+
+class ResponseHeaderDict(HeaderDict):
+
+ def __init__(self, *args, **kw):
+ warnings.warn(
+ "The class wsgilib.ResponseHeaderDict has been moved "
+ "to paste.response.HeaderDict",
+ DeprecationWarning, 2)
+ HeaderDict.__init__(self, *args, **kw)
+
+def _warn_deprecated(new_func):
+ new_name = new_func.func_name
+ new_path = new_func.func_globals['__name__'] + '.' + new_name
+ def replacement(*args, **kw):
+ warnings.warn(
+ "The function wsgilib.%s has been moved to %s"
+ % (new_name, new_path),
+ DeprecationWarning, 2)
+ return new_func(*args, **kw)
+ try:
+ replacement.func_name = new_func.func_name
+ except:
+ pass
+ return replacement
+
+# Put warnings wrapper in place for all public functions that
+# were imported from elsewhere:
+
+for _name in __all__:
+ _func = globals()[_name]
+ if (hasattr(_func, 'func_globals')
+ and _func.func_globals['__name__'] != __name__):
+ globals()[_name] = _warn_deprecated(_func)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/paste/wsgiwrappers.py b/paste/wsgiwrappers.py
new file mode 100644
index 0000000..64f20cf
--- /dev/null
+++ b/paste/wsgiwrappers.py
@@ -0,0 +1,582 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+"""WSGI Wrappers for a Request and Response
+
+The WSGIRequest and WSGIResponse objects are light wrappers to make it easier
+to deal with an incoming request and sending a response.
+"""
+import re
+import warnings
+from pprint import pformat
+from Cookie import SimpleCookie
+from paste.request import EnvironHeaders, get_cookie_dict, \
+ parse_dict_querystring, parse_formvars
+from paste.util.multidict import MultiDict, UnicodeMultiDict
+from paste.registry import StackedObjectProxy
+from paste.response import HeaderDict
+from paste.wsgilib import encode_unicode_app_iter
+from paste.httpheaders import ACCEPT_LANGUAGE
+from paste.util.mimeparse import desired_matches
+
+__all__ = ['WSGIRequest', 'WSGIResponse']
+
+_CHARSET_RE = re.compile(r';\s*charset=([^;]*)', re.I)
+
+class DeprecatedSettings(StackedObjectProxy):
+ def _push_object(self, obj):
+ warnings.warn('paste.wsgiwrappers.settings is deprecated: Please use '
+ 'paste.wsgiwrappers.WSGIRequest.defaults instead',
+ DeprecationWarning, 3)
+ WSGIResponse.defaults._push_object(obj)
+ StackedObjectProxy._push_object(self, obj)
+
+# settings is deprecated: use WSGIResponse.defaults instead
+settings = DeprecatedSettings(default=dict())
+
+class environ_getter(object):
+ """For delegating an attribute to a key in self.environ."""
+ # @@: Also __set__? Should setting be allowed?
+ def __init__(self, key, default='', default_factory=None):
+ self.key = key
+ self.default = default
+ self.default_factory = default_factory
+ def __get__(self, obj, type=None):
+ if type is None:
+ return self
+ if self.key not in obj.environ:
+ if self.default_factory:
+ val = obj.environ[self.key] = self.default_factory()
+ return val
+ else:
+ return self.default
+ return obj.environ[self.key]
+
+ def __repr__(self):
+ return '<Proxy for WSGI environ %r key>' % self.key
+
+class WSGIRequest(object):
+ """WSGI Request API Object
+
+ This object represents a WSGI request with a more friendly interface.
+ This does not expose every detail of the WSGI environment, and attempts
+ to express nothing beyond what is available in the environment
+ dictionary.
+
+ The only state maintained in this object is the desired ``charset``,
+ its associated ``errors`` handler, and the ``decode_param_names``
+ option.
+
+ The incoming parameter values will be automatically coerced to unicode
+ objects of the ``charset`` encoding when ``charset`` is set. The
+ incoming parameter names are not decoded to unicode unless the
+ ``decode_param_names`` option is enabled.
+
+ When unicode is expected, ``charset`` will overridden by the the
+ value of the ``Content-Type`` header's charset parameter if one was
+ specified by the client.
+
+ The class variable ``defaults`` specifies default values for
+ ``charset``, ``errors``, and ``langauge``. These can be overridden for the
+ current request via the registry.
+
+ The ``language`` default value is considered the fallback during i18n
+ translations to ensure in odd cases that mixed languages don't occur should
+ the ``language`` file contain the string but not another language in the
+ accepted languages list. The ``language`` value only applies when getting
+ a list of accepted languages from the HTTP Accept header.
+
+ This behavior is duplicated from Aquarium, and may seem strange but is
+ very useful. Normally, everything in the code is in "en-us". However,
+ the "en-us" translation catalog is usually empty. If the user requests
+ ``["en-us", "zh-cn"]`` and a translation isn't found for a string in
+ "en-us", you don't want gettext to fallback to "zh-cn". You want it to
+ just use the string itself. Hence, if a string isn't found in the
+ ``language`` catalog, the string in the source code will be used.
+
+ *All* other state is kept in the environment dictionary; this is
+ essential for interoperability.
+
+ You are free to subclass this object.
+
+ """
+ defaults = StackedObjectProxy(default=dict(charset=None, errors='replace',
+ decode_param_names=False,
+ language='en-us'))
+ def __init__(self, environ):
+ self.environ = environ
+ # This isn't "state" really, since the object is derivative:
+ self.headers = EnvironHeaders(environ)
+
+ defaults = self.defaults._current_obj()
+ self.charset = defaults.get('charset')
+ if self.charset:
+ # There's a charset: params will be coerced to unicode. In that
+ # case, attempt to use the charset specified by the browser
+ browser_charset = self.determine_browser_charset()
+ if browser_charset:
+ self.charset = browser_charset
+ self.errors = defaults.get('errors', 'strict')
+ self.decode_param_names = defaults.get('decode_param_names', False)
+ self._languages = None
+
+ body = environ_getter('wsgi.input')
+ scheme = environ_getter('wsgi.url_scheme')
+ method = environ_getter('REQUEST_METHOD')
+ script_name = environ_getter('SCRIPT_NAME')
+ path_info = environ_getter('PATH_INFO')
+
+ def urlvars(self):
+ """
+ Return any variables matched in the URL (e.g.,
+ ``wsgiorg.routing_args``).
+ """
+ if 'paste.urlvars' in self.environ:
+ return self.environ['paste.urlvars']
+ elif 'wsgiorg.routing_args' in self.environ:
+ return self.environ['wsgiorg.routing_args'][1]
+ else:
+ return {}
+ urlvars = property(urlvars, doc=urlvars.__doc__)
+
+ def is_xhr(self):
+ """Returns a boolean if X-Requested-With is present and a XMLHttpRequest"""
+ return self.environ.get('HTTP_X_REQUESTED_WITH', '') == 'XMLHttpRequest'
+ is_xhr = property(is_xhr, doc=is_xhr.__doc__)
+
+ def host(self):
+ """Host name provided in HTTP_HOST, with fall-back to SERVER_NAME"""
+ return self.environ.get('HTTP_HOST', self.environ.get('SERVER_NAME'))
+ host = property(host, doc=host.__doc__)
+
+ def languages(self):
+ """Return a list of preferred languages, most preferred first.
+
+ The list may be empty.
+ """
+ if self._languages is not None:
+ return self._languages
+ acceptLanguage = self.environ.get('HTTP_ACCEPT_LANGUAGE')
+ langs = ACCEPT_LANGUAGE.parse(self.environ)
+ fallback = self.defaults.get('language', 'en-us')
+ if not fallback:
+ return langs
+ if fallback not in langs:
+ langs.append(fallback)
+ index = langs.index(fallback)
+ langs[index+1:] = []
+ self._languages = langs
+ return self._languages
+ languages = property(languages, doc=languages.__doc__)
+
+ def _GET(self):
+ return parse_dict_querystring(self.environ)
+
+ def GET(self):
+ """
+ Dictionary-like object representing the QUERY_STRING
+ parameters. Always present, if possibly empty.
+
+ If the same key is present in the query string multiple times, a
+ list of its values can be retrieved from the ``MultiDict`` via
+ the ``getall`` method.
+
+ Returns a ``MultiDict`` container or a ``UnicodeMultiDict`` when
+ ``charset`` is set.
+ """
+ params = self._GET()
+ if self.charset:
+ params = UnicodeMultiDict(params, encoding=self.charset,
+ errors=self.errors,
+ decode_keys=self.decode_param_names)
+ return params
+ GET = property(GET, doc=GET.__doc__)
+
+ def _POST(self):
+ return parse_formvars(self.environ, include_get_vars=False)
+
+ def POST(self):
+ """Dictionary-like object representing the POST body.
+
+ Most values are encoded strings, or unicode strings when
+ ``charset`` is set. There may also be FieldStorage objects
+ representing file uploads. If this is not a POST request, or the
+ body is not encoded fields (e.g., an XMLRPC request) then this
+ will be empty.
+
+ This will consume wsgi.input when first accessed if applicable,
+ but the raw version will be put in
+ environ['paste.parsed_formvars'].
+
+ Returns a ``MultiDict`` container or a ``UnicodeMultiDict`` when
+ ``charset`` is set.
+ """
+ params = self._POST()
+ if self.charset:
+ params = UnicodeMultiDict(params, encoding=self.charset,
+ errors=self.errors,
+ decode_keys=self.decode_param_names)
+ return params
+ POST = property(POST, doc=POST.__doc__)
+
+ def params(self):
+ """Dictionary-like object of keys from POST, GET, URL dicts
+
+ Return a key value from the parameters, they are checked in the
+ following order: POST, GET, URL
+
+ Additional methods supported:
+
+ ``getlist(key)``
+ Returns a list of all the values by that key, collected from
+ POST, GET, URL dicts
+
+ Returns a ``MultiDict`` container or a ``UnicodeMultiDict`` when
+ ``charset`` is set.
+ """
+ params = MultiDict()
+ params.update(self._POST())
+ params.update(self._GET())
+ if self.charset:
+ params = UnicodeMultiDict(params, encoding=self.charset,
+ errors=self.errors,
+ decode_keys=self.decode_param_names)
+ return params
+ params = property(params, doc=params.__doc__)
+
+ def cookies(self):
+ """Dictionary of cookies keyed by cookie name.
+
+ Just a plain dictionary, may be empty but not None.
+
+ """
+ return get_cookie_dict(self.environ)
+ cookies = property(cookies, doc=cookies.__doc__)
+
+ def determine_browser_charset(self):
+ """
+ Determine the encoding as specified by the browser via the
+ Content-Type's charset parameter, if one is set
+ """
+ charset_match = _CHARSET_RE.search(self.headers.get('Content-Type', ''))
+ if charset_match:
+ return charset_match.group(1)
+
+ def match_accept(self, mimetypes):
+ """Return a list of specified mime-types that the browser's HTTP Accept
+ header allows in the order provided."""
+ return desired_matches(mimetypes,
+ self.environ.get('HTTP_ACCEPT', '*/*'))
+
+ def __repr__(self):
+ """Show important attributes of the WSGIRequest"""
+ pf = pformat
+ msg = '<%s.%s object at 0x%x method=%s,' % \
+ (self.__class__.__module__, self.__class__.__name__,
+ id(self), pf(self.method))
+ msg += '\nscheme=%s, host=%s, script_name=%s, path_info=%s,' % \
+ (pf(self.scheme), pf(self.host), pf(self.script_name),
+ pf(self.path_info))
+ msg += '\nlanguages=%s,' % pf(self.languages)
+ if self.charset:
+ msg += ' charset=%s, errors=%s,' % (pf(self.charset),
+ pf(self.errors))
+ msg += '\nGET=%s,' % pf(self.GET)
+ msg += '\nPOST=%s,' % pf(self.POST)
+ msg += '\ncookies=%s>' % pf(self.cookies)
+ return msg
+
+class WSGIResponse(object):
+ """A basic HTTP response with content, headers, and out-bound cookies
+
+ The class variable ``defaults`` specifies default values for
+ ``content_type``, ``charset`` and ``errors``. These can be overridden
+ for the current request via the registry.
+
+ """
+ defaults = StackedObjectProxy(
+ default=dict(content_type='text/html', charset='utf-8',
+ errors='strict', headers={'Cache-Control':'no-cache'})
+ )
+ def __init__(self, content='', mimetype=None, code=200):
+ self._iter = None
+ self._is_str_iter = True
+
+ self.content = content
+ self.headers = HeaderDict()
+ self.cookies = SimpleCookie()
+ self.status_code = code
+
+ defaults = self.defaults._current_obj()
+ if not mimetype:
+ mimetype = defaults.get('content_type', 'text/html')
+ charset = defaults.get('charset')
+ if charset:
+ mimetype = '%s; charset=%s' % (mimetype, charset)
+ self.headers.update(defaults.get('headers', {}))
+ self.headers['Content-Type'] = mimetype
+ self.errors = defaults.get('errors', 'strict')
+
+ def __str__(self):
+ """Returns a rendition of the full HTTP message, including headers.
+
+ When the content is an iterator, the actual content is replaced with the
+ output of str(iterator) (to avoid exhausting the iterator).
+ """
+ if self._is_str_iter:
+ content = ''.join(self.get_content())
+ else:
+ content = str(self.content)
+ return '\n'.join(['%s: %s' % (key, value)
+ for key, value in self.headers.headeritems()]) \
+ + '\n\n' + content
+
+ def __call__(self, environ, start_response):
+ """Convenience call to return output and set status information
+
+ Conforms to the WSGI interface for calling purposes only.
+
+ Example usage:
+
+ .. code-block:: python
+
+ def wsgi_app(environ, start_response):
+ response = WSGIResponse()
+ response.write("Hello world")
+ response.headers['Content-Type'] = 'latin1'
+ return response(environ, start_response)
+
+ """
+ status_text = STATUS_CODE_TEXT[self.status_code]
+ status = '%s %s' % (self.status_code, status_text)
+ response_headers = self.headers.headeritems()
+ for c in self.cookies.values():
+ response_headers.append(('Set-Cookie', c.output(header='')))
+ start_response(status, response_headers)
+ is_file = isinstance(self.content, file)
+ if 'wsgi.file_wrapper' in environ and is_file:
+ return environ['wsgi.file_wrapper'](self.content)
+ elif is_file:
+ return iter(lambda: self.content.read(), '')
+ return self.get_content()
+
+ def determine_charset(self):
+ """
+ Determine the encoding as specified by the Content-Type's charset
+ parameter, if one is set
+ """
+ charset_match = _CHARSET_RE.search(self.headers.get('Content-Type', ''))
+ if charset_match:
+ return charset_match.group(1)
+
+ def has_header(self, header):
+ """
+ Case-insensitive check for a header
+ """
+ warnings.warn('WSGIResponse.has_header is deprecated, use '
+ 'WSGIResponse.headers.has_key instead', DeprecationWarning,
+ 2)
+ return self.headers.has_key(header)
+
+ def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
+ domain=None, secure=None, httponly=None):
+ """
+ Define a cookie to be sent via the outgoing HTTP headers
+ """
+ self.cookies[key] = value
+ for var_name, var_value in [
+ ('max_age', max_age), ('path', path), ('domain', domain),
+ ('secure', secure), ('expires', expires), ('httponly', httponly)]:
+ if var_value is not None and var_value is not False:
+ self.cookies[key][var_name.replace('_', '-')] = var_value
+
+ def delete_cookie(self, key, path='/', domain=None):
+ """
+ Notify the browser the specified cookie has expired and should be
+ deleted (via the outgoing HTTP headers)
+ """
+ self.cookies[key] = ''
+ if path is not None:
+ self.cookies[key]['path'] = path
+ if domain is not None:
+ self.cookies[key]['domain'] = domain
+ self.cookies[key]['expires'] = 0
+ self.cookies[key]['max-age'] = 0
+
+ def _set_content(self, content):
+ if hasattr(content, '__iter__'):
+ self._iter = content
+ if isinstance(content, list):
+ self._is_str_iter = True
+ else:
+ self._is_str_iter = False
+ else:
+ self._iter = [content]
+ self._is_str_iter = True
+ content = property(lambda self: self._iter, _set_content,
+ doc='Get/set the specified content, where content can '
+ 'be: a string, a list of strings, a generator function '
+ 'that yields strings, or an iterable object that '
+ 'produces strings.')
+
+ def get_content(self):
+ """
+ Returns the content as an iterable of strings, encoding each element of
+ the iterator from a Unicode object if necessary.
+ """
+ charset = self.determine_charset()
+ if charset:
+ return encode_unicode_app_iter(self.content, charset, self.errors)
+ else:
+ return self.content
+
+ def wsgi_response(self):
+ """
+ Return this WSGIResponse as a tuple of WSGI formatted data, including:
+ (status, headers, iterable)
+ """
+ status_text = STATUS_CODE_TEXT[self.status_code]
+ status = '%s %s' % (self.status_code, status_text)
+ response_headers = self.headers.headeritems()
+ for c in self.cookies.values():
+ response_headers.append(('Set-Cookie', c.output(header='')))
+ return status, response_headers, self.get_content()
+
+ # The remaining methods partially implement the file-like object interface.
+ # See http://docs.python.org/lib/bltin-file-objects.html
+ def write(self, content):
+ if not self._is_str_iter:
+ raise IOError, "This %s instance's content is not writable: (content " \
+ 'is an iterator)' % self.__class__.__name__
+ self.content.append(content)
+
+ def flush(self):
+ pass
+
+ def tell(self):
+ if not self._is_str_iter:
+ raise IOError, 'This %s instance cannot tell its position: (content ' \
+ 'is an iterator)' % self.__class__.__name__
+ return sum([len(chunk) for chunk in self._iter])
+
+ ########################################
+ ## Content-type and charset
+
+ def charset__get(self):
+ """
+ Get/set the charset (in the Content-Type)
+ """
+ header = self.headers.get('content-type')
+ if not header:
+ return None
+ match = _CHARSET_RE.search(header)
+ if match:
+ return match.group(1)
+ return None
+
+ def charset__set(self, charset):
+ if charset is None:
+ del self.charset
+ return
+ try:
+ header = self.headers.pop('content-type')
+ except KeyError:
+ raise AttributeError(
+ "You cannot set the charset when no content-type is defined")
+ match = _CHARSET_RE.search(header)
+ if match:
+ header = header[:match.start()] + header[match.end():]
+ header += '; charset=%s' % charset
+ self.headers['content-type'] = header
+
+ def charset__del(self):
+ try:
+ header = self.headers.pop('content-type')
+ except KeyError:
+ # Don't need to remove anything
+ return
+ match = _CHARSET_RE.search(header)
+ if match:
+ header = header[:match.start()] + header[match.end():]
+ self.headers['content-type'] = header
+
+ charset = property(charset__get, charset__set, charset__del, doc=charset__get.__doc__)
+
+ def content_type__get(self):
+ """
+ Get/set the Content-Type header (or None), *without* the
+ charset or any parameters.
+
+ If you include parameters (or ``;`` at all) when setting the
+ content_type, any existing parameters will be deleted;
+ otherwise they will be preserved.
+ """
+ header = self.headers.get('content-type')
+ if not header:
+ return None
+ return header.split(';', 1)[0]
+
+ def content_type__set(self, value):
+ if ';' not in value:
+ header = self.headers.get('content-type', '')
+ if ';' in header:
+ params = header.split(';', 1)[1]
+ value += ';' + params
+ self.headers['content-type'] = value
+
+ def content_type__del(self):
+ try:
+ del self.headers['content-type']
+ except KeyError:
+ pass
+
+ content_type = property(content_type__get, content_type__set,
+ content_type__del, doc=content_type__get.__doc__)
+
+## @@ I'd love to remove this, but paste.httpexceptions.get_exception
+## doesn't seem to work...
+# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
+STATUS_CODE_TEXT = {
+ 100: 'CONTINUE',
+ 101: 'SWITCHING PROTOCOLS',
+ 200: 'OK',
+ 201: 'CREATED',
+ 202: 'ACCEPTED',
+ 203: 'NON-AUTHORITATIVE INFORMATION',
+ 204: 'NO CONTENT',
+ 205: 'RESET CONTENT',
+ 206: 'PARTIAL CONTENT',
+ 226: 'IM USED',
+ 300: 'MULTIPLE CHOICES',
+ 301: 'MOVED PERMANENTLY',
+ 302: 'FOUND',
+ 303: 'SEE OTHER',
+ 304: 'NOT MODIFIED',
+ 305: 'USE PROXY',
+ 306: 'RESERVED',
+ 307: 'TEMPORARY REDIRECT',
+ 400: 'BAD REQUEST',
+ 401: 'UNAUTHORIZED',
+ 402: 'PAYMENT REQUIRED',
+ 403: 'FORBIDDEN',
+ 404: 'NOT FOUND',
+ 405: 'METHOD NOT ALLOWED',
+ 406: 'NOT ACCEPTABLE',
+ 407: 'PROXY AUTHENTICATION REQUIRED',
+ 408: 'REQUEST TIMEOUT',
+ 409: 'CONFLICT',
+ 410: 'GONE',
+ 411: 'LENGTH REQUIRED',
+ 412: 'PRECONDITION FAILED',
+ 413: 'REQUEST ENTITY TOO LARGE',
+ 414: 'REQUEST-URI TOO LONG',
+ 415: 'UNSUPPORTED MEDIA TYPE',
+ 416: 'REQUESTED RANGE NOT SATISFIABLE',
+ 417: 'EXPECTATION FAILED',
+ 429: 'TOO MANY REQUESTS',
+ 500: 'INTERNAL SERVER ERROR',
+ 501: 'NOT IMPLEMENTED',
+ 502: 'BAD GATEWAY',
+ 503: 'SERVICE UNAVAILABLE',
+ 504: 'GATEWAY TIMEOUT',
+ 505: 'HTTP VERSION NOT SUPPORTED',
+}