summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGraham.Dumpleton <devnull@localhost>2007-06-23 03:48:29 +0000
committerGraham.Dumpleton <devnull@localhost>2007-06-23 03:48:29 +0000
commite285316aca8f732c6f122c8df0934b847b837ca1 (patch)
treeb917eddd32cd8f4b009e4173bad029c15a9efafa
downloadmod_wsgi-e285316aca8f732c6f122c8df0934b847b837ca1.tar.gz
Move mod_wsgi source code into a mod_wsgi subdirectory of trunk to allow
companion packages to be offered out of same repository at a later date.
-rw-r--r--LICENCE202
-rw-r--r--Makefile-1.X.in40
-rw-r--r--Makefile-2.X.in41
-rw-r--r--README310
-rwxr-xr-xconfigure2304
-rw-r--r--configure.ac100
-rw-r--r--mod_wsgi.c6893
7 files changed, 9890 insertions, 0 deletions
diff --git a/LICENCE b/LICENCE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENCE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile-1.X.in b/Makefile-1.X.in
new file mode 100644
index 0000000..2fc01ac
--- /dev/null
+++ b/Makefile-1.X.in
@@ -0,0 +1,40 @@
+# Copyright 2007 GRAHAM DUMPLETON
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+APXS=@APXS@
+PYTHON=@PYTHON@
+
+CPPFLAGS = @CPPFLAGS@
+CFLAGS =
+LDFLAGS = @LDFLAGS@
+LDLIBS = @LDLIBS@
+
+all : mod_wsgi.so
+
+mod_wsgi.so : mod_wsgi.c
+ $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS)
+
+install : all
+ $(APXS) -i -n 'mod_wsgi' mod_wsgi.so
+
+clean :
+ -rm -f mod_wsgi.o mod_wsgi.so
+ -rm -f config.log config.status
+ -rm -rf autom4te.cache
+
+distclean : clean
+ -rm -f Makefile Makefile.in
+
+realclean : distclean
+ -rm -f configure
diff --git a/Makefile-2.X.in b/Makefile-2.X.in
new file mode 100644
index 0000000..34f014c
--- /dev/null
+++ b/Makefile-2.X.in
@@ -0,0 +1,41 @@
+# Copyright 2007 GRAHAM DUMPLETON
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+APXS=@APXS@
+PYTHON=@PYTHON@
+
+CPPFLAGS = @CPPFLAGS@
+CFLAGS =
+LDFLAGS = @LDFLAGS@
+LDLIBS = @LDLIBS@
+
+all : mod_wsgi.la
+
+mod_wsgi.la : mod_wsgi.c
+ $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS)
+
+install : all
+ $(APXS) -i -n 'mod_wsgi' mod_wsgi.la
+
+clean :
+ -rm -rf .libs
+ -rm -f mod_wsgi.o mod_wsgi.la mod_wsgi.lo mod_wsgi.slo mod_wsgi.loT
+ -rm -f config.log config.status
+ -rm -rf autom4te.cache
+
+distclean : clean
+ -rm -f Makefile Makefile.in
+
+realclean : distclean
+ -rm -f configure
diff --git a/README b/README
new file mode 100644
index 0000000..3f1aada
--- /dev/null
+++ b/README
@@ -0,0 +1,310 @@
+===================
+Welcome to MOD_WSGI
+===================
+
+Copyright 2007 GRAHAM DUMPLETON
+
+The mod_wsgi adapter is an Apache module that provides a WSGI compliant
+interface for hosting Python based web applications within Apache. The
+adapter is written completely in C code against the Apache C runtime and
+for hosting WSGI applications within Apache has a lower overhead than using
+existing WSGI adapters for mod_python or CGI.
+
+The package can be compiled for and used with either Apache 1.3, 2.0 or
+2.2. On UNIX systems, either the single threaded 'prefork' or multithreaded
+'worker' Apache MPMs can be used. Development and testing of mod_wsgi has
+been carried out with Python 2.3.5, but is expected to be able to work with
+any version of Python 2.3, or later versions of Python.
+
+Whatever version of Python is used, it must have been compiled with support
+for multithreading. To avoid a measure of memory bloat with your Apache
+processes, Python should also have been compiled with shared library
+support enabled. The majority of Python binary packages for Linux systems
+are not compiled with shared library support enabled. You should therefore
+consider recompiling Python from source code with shared library support
+enabled.
+
+If using a Python binary package for a Linux system, also ensure that the
+you have the corresponding 'dev' package installed for the Python package
+you have installed. Without this package you will be missing the Python
+header files and configuration files need to build mod_wsgi. You should
+also ensure you have the corresponding 'dev' package for the Apache web
+server package you are using.
+
+At this time only build scripts for UNIX systems are supplied. The code
+should still work on Windows, but still waiting for someone to volunteer
+to help develop and test some build scripts for Windows and ensure that
+mod_wsgi works on that platform.
+
+The source code in this package is made available under the terms of the
+Apache Licence, Version 2.0. See the "LICENCE" file for more information.
+
+Installation (UNIX)
+===================
+
+To setup the package ready for building run the "configure" script.
+
+ ./configure
+
+The configure script will attempt to identify the Apache installation to
+use by searching for either the "apxs2" or "apxs" tools included with your
+Apache installation. Similarly, which Python installation to use will be
+determined by looking for the "python" executable.
+
+If these programs are not in a standard location, they cannot be found in
+your PATH, or you wish to use alternate versions to those found, the
+"--with-apxs" and "--with-python" options can be used in conjunction with the
+"configure" script.
+
+ ./configure --with-apxs=/usr/local/apache/bin/apxs \
+ --with-python=/usr/local/bin/python
+
+Note that any of the major Apache versions should be able to be used, ie.,
+all of Apache 1.3, 2.0 and 2.2 should be compatible with this package. You
+will however need to compile the package separately against each version
+and use the resultant Apache module only with the version it was compiled
+for. Which ever version of Apache is used however, it must support dynamic
+loading of Apache modules.
+
+If you have multiple versions of Python installed and you are not using
+that which is the default, you may have to organise that the PATH inherited
+by the Apache application when run will result in Apache finding the
+alternate version. Alternatively, the WSGIPythonExecutable directive should
+be used to specify the exact location of the 'python' executable
+corresponding to the version of Python compiled against. If this is not
+done, the version of Python running within Apache may attempt to use the
+Python modules from the wrong version of Python.
+
+Also note that the Apache module will be bound to the specific major/minor
+version of Python being used. If you ever upgrade to a newer version of
+Python, you will need to rebuild the mod_wsgi module.
+
+Once the package has been configured, it can be built by running:
+
+ make
+
+The only product of the build process that needs to be installed is the
+Apache module itself. There are no separate Python code files as everything
+is done within C code compiled into the Apache module.
+
+To install the Apache module into the standard location for Apache modules
+as dictated by Apache for your installation, run:
+
+ make install
+
+Installation should be done as the root user if appropriate.
+
+If you want to install the Apache module in a non standard location
+dictated by how your operating system distribution structures the
+configuration files and modules for Apache, you will need to copy the file
+manually into place.
+
+If you are using Apache 1.3 the compiled Apache module can be found in the
+same directory as this "README" file and is called "mod_wsgi.so". If you
+are using Apache 2.X the compiled Apache module can be found in the ".libs"
+subdirectory and is again called "mod_wsgi.so". The name of the file should
+be kept the same when copied into its appropriate location.
+
+To cleanup after installation, run:
+
+ make clean
+
+If you need to build the module for a different version of Apache, you
+should run:
+
+ make distclean
+
+and then rerun "configure" against the alternate version of Apache before
+attempting to run "make" again.
+
+
+Apache Configuration
+====================
+
+Once the Apache module has been installed into your Apache installation's
+module directory, it is still necessary to configure Apache to actually
+load the module.
+
+Exactly how this is done and in which of the main Apache configuration
+files it should be placed, is dependent on which version of Apache you are
+using and may also be influenced by how your operating system's Apache
+distribution has organised the Apache configuration files. You may
+therefore need to check with any documentation for your operating system to
+see in what way the procedure may need to be modified.
+
+In the simplest case, all that is required is to add a line of the form:
+
+ LoadModule wsgi_module modules/mod_wsgi.so
+
+into the main Apache "httpd.conf" configuration file at the same point that
+other Apache modules are being loaded. The last option to the directive
+should either be an absolute path to where the mod_wsgi module file is
+located, or a path expressed relative to the root of your Apache
+installation. If you used "make" to install the package, see where it
+copied the file to work out what to set this value to.
+
+With Apache 1.3, it would also be necessary to add a line of the form:
+
+ AddModule mod_wsgi.c
+
+If you wish to use mod_python at the same time as mod_wsgi, then you must
+be using a version of Python which has been configured and compiled so as
+to generate a shared library for Python. If you do not do this and either
+mod_python or mod_wsgi are compiled against a static library for Python,
+it is likely that either mod_python or mod_wsgi will crash Apache when
+used.
+
+Note that this is not the fault of either mod_python or mod_wsgi but arises
+purely because your Python installation isn't using a shared library for
+the Python library. The result of such a configuration means that there are
+actually two copies of the Python static library objects in memory at the
+same time and this can cause problems. Linux distributions where this is
+known to be a problem are any of the RedHat derived distributions. Other
+distributions such as Ubuntu do not have a problem as they use a shared
+library for the Python library.
+
+Having adding the required directives you should perform a restart of
+Apache to check everything is okay.
+
+ apachectl restart
+
+If you see any sort of problem, or if you are upgrading from an older
+version of mod_wsgi, it is recommended you actually stop/start Apache
+instead.
+
+ apachectl stop
+ apachectl start
+
+If all is okay, you should see a line of the form:
+
+ Apache/2.2.2 (Unix) mod_wsgi/1.0 Python/2.3.5 configured
+
+in the Apache error log file.
+
+If Apache is configured to also load mod_python, it would instead be:
+
+ Apache/2.2.2 (Unix) mod_python/3.3.1 Python/2.3.5 mod_wsgi/1.0 configured
+
+That "Python" is listed before "mod_wsgi" is indicative of the fact that
+when both modules are being loaded, mod_wsgi will leave it up to mod_python
+to initialise Python.
+
+Note that mod_wsgi logs various detailed information about interpreter
+creation, script loading and reloading etc, but it logs with log level of
+'info'. As the default for the Apache LogLevel directive is usually 'warn',
+such information will not be displayed in the Apache error log file. If you
+are new to mod_wsgi or need to debug issues with its use, it is recommended
+to change the Apache LogLevel directive to 'info' so that the information
+is displayed. For example:
+
+ LogLevel info
+
+If only wishing to enable this level of log information for a single
+VirtualHost this can be done by specifying the directive in the context of
+the VirtualHost container, but by doing so, only the more detailed
+information which is specific to a request against that virtual host will
+be output.
+
+
+Enabling WSGI Application
+=========================
+
+The mechanisms used to configure mod_wsgi are similar to that used by the
+Apache mod_cgi module to enable traditional CGI scripts.
+
+For example, whereas mod_cgi has the ScriptAlias directive, mod_wsgi has
+the WSGIScriptAlias directive. Like with mod_cgi, this directive can only
+appear in the main Apache configuration files. The directive should be used
+within the VirtualHost container and cannot be used within either of the
+Location, Directory or Files container directives, neither can it be used
+within a ".htaccess" files.
+
+The first way of using the WSGIScriptAlias directive to indicate the WSGI
+application to be used, is to associate a WSGI application against a specific
+URL prefix.
+
+ WSGIScriptAlias /myapp /usr/local/wsgi/scripts/myapp.wsgi
+
+The last option to the directive in this case must be a full pathname to
+the actual code file containing the WSGI application. The WSGI application
+contained within the code file should be called "application". For example:
+
+ def application(environ, start_response):
+ status = '200 OK'
+ output = 'Hello World!'
+
+ response_headers = [('Content-type', 'text/plain'),
+ ('Content-Length', str(len(output)))]
+ start_response(status, response_headers)
+
+ return [output]
+
+Note that an absolute pathname must be used. It is not possible to specify
+an application by Python module name alone. A full path is used for a
+number of reasons, the main one being so that all the Apache access
+controls can still be applied to indicate who can actually access the WSGI
+application. Because these access controls will apply, if the WSGI
+application is located outside of any directories already known to Apache,
+it will be necessary to tell Apache that files within that directory can be
+used. To do this the Directory directive must be used.
+
+ <Directory /usr/local/wsgi/scripts>
+ Order allow,deny
+ Allow from all
+ </Directory>
+
+The second way of using the WSGIScriptAlias directive is to use it to map
+to a directory containing any number of WSGI applications.
+
+ WSGIScriptAlias /wsgi/ /usr/local/wsgi/scripts/
+
+When this is used, the next part of the URL after the URL prefix is used
+to identify which WSGI application code file within the target directory
+should be used.
+
+Note that by default each application is placed into its own distinct
+application group. This means that each application will be given its own
+distinct Python sub interpreter to run code within. Although this means
+that applications will be isolated and cannot interfere with the Python code
+components of each other, each will load its own copy of all Python modules
+it requires into memory. If you have many applications and they use a lot
+of different Python modules this can result in large process sizes.
+
+To avoid large process sizes, if you know that applications within a
+directory can safely coexist and run together within the same Python sub
+interpreter, you can specify that all applications within a certain context
+should be placed in the same application group. This is indicated by using
+the WSGIApplicationGroup directive. The argument to the directive can be
+any unique name of your choosing.
+
+ <Directory /usr/local/wsgi/scripts>
+ WSGIApplicationGroup admin-scripts
+ Order allow,deny
+ Allow from all
+ </Directory>
+
+The above only begins to describe the different ways in which mod_wsgi can
+be enabled for use. It does not cover issues such as reloading of
+application script files, additional means of managing Python sub
+interpreters, or how to supply configuration information to WSGI
+applications.
+
+For a more detailed explaination of how to use mod_wsgi, consult the
+documentation found at:
+
+ http://www.modwsgi.org
+
+If you have questions specifically about mod_wsgi and its configuration and
+use, use the Google discussion group at:
+
+ http://groups.google.com/group/modwsgi
+
+If your questions are about WSGI in general, use the Python Web-SIG or
+comp.lang.python USENET group:
+
+ http://groups.google.com/group/python-web-sig
+ http://groups.google.com/group/comp.lang.python
+
+Enjoy.
+
+Graham Dumpleton
diff --git a/configure b/configure
new file mode 100755
index 0000000..024d1b3
--- /dev/null
+++ b/configure
@@ -0,0 +1,2304 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.59.
+#
+# Copyright (C) 2003 Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+exec 6>&1
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_config_libobj_dir=.
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Maximum number of lines to put in a shell here document.
+# This variable seems obsolete. It should probably be removed, and
+# only ac_max_sed_lines should be used.
+: ${ac_max_here_lines=38}
+
+# Identity of this package.
+PACKAGE_NAME=
+PACKAGE_TARNAME=
+PACKAGE_VERSION=
+PACKAGE_STRING=
+PACKAGE_BUGREPORT=
+
+ac_unique_file="mod_wsgi.c"
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS APXS PYTHON CPPFLAGS LDFLAGS LDLIBS LIBOBJS LTLIBOBJS'
+ac_subst_files=''
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datadir='${prefix}/share'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+libdir='${exec_prefix}/lib'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+infodir='${prefix}/info'
+mandir='${prefix}/man'
+
+ac_prev=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'`
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_option in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ eval "enable_$ac_feature=no" ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "enable_$ac_feature='$ac_optarg'" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ case $ac_option in
+ *=*) ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) ac_optarg=yes ;;
+ esac
+ eval "with_$ac_package='$ac_optarg'" ;;
+
+ -without-* | --without-*)
+ ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/-/_/g'`
+ eval "with_$ac_package=no" ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ ac_optarg=`echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"`
+ eval "$ac_envvar='$ac_optarg'"
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+# Be sure to have absolute paths.
+for ac_var in exec_prefix prefix
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* | NONE | '' ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# Be sure to have absolute paths.
+for ac_var in bindir sbindir libexecdir datadir sysconfdir sharedstatedir \
+ localstatedir libdir includedir oldincludedir infodir mandir
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) ;;
+ *) { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then its parent.
+ ac_confdir=`(dirname "$0") 2>/dev/null ||
+$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$0" : 'X\(//\)[^/]' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$0" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r $srcdir/$ac_unique_file; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r $srcdir/$ac_unique_file; then
+ if test "$ac_srcdir_defaulted" = yes; then
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $ac_confdir or .." >&2
+ { (exit 1); exit 1; }; }
+ else
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+ fi
+fi
+(cd $srcdir && test -r ./$ac_unique_file) 2>/dev/null ||
+ { echo "$as_me: error: sources are in $srcdir, but \`cd $srcdir' does not work" >&2
+ { (exit 1); exit 1; }; }
+srcdir=`echo "$srcdir" | sed 's%\([^\\/]\)[\\/]*$%\1%'`
+ac_env_build_alias_set=${build_alias+set}
+ac_env_build_alias_value=$build_alias
+ac_cv_env_build_alias_set=${build_alias+set}
+ac_cv_env_build_alias_value=$build_alias
+ac_env_host_alias_set=${host_alias+set}
+ac_env_host_alias_value=$host_alias
+ac_cv_env_host_alias_set=${host_alias+set}
+ac_cv_env_host_alias_value=$host_alias
+ac_env_target_alias_set=${target_alias+set}
+ac_env_target_alias_value=$target_alias
+ac_cv_env_target_alias_set=${target_alias+set}
+ac_cv_env_target_alias_value=$target_alias
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures this package to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+_ACEOF
+
+ cat <<_ACEOF
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --datadir=DIR read-only architecture-independent data [PREFIX/share]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --infodir=DIR info documentation [PREFIX/info]
+ --mandir=DIR man documentation [PREFIX/man]
+_ACEOF
+
+ cat <<\_ACEOF
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+
+ cat <<\_ACEOF
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-apxs=NAME name of the apxs executable [apxs]
+ --with-python=NAME name of the python executable [python]
+
+_ACEOF
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ ac_popdir=`pwd`
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d $ac_dir || continue
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+
+# Do not use `cd foo && pwd` to compute absolute paths, because
+# the directories may not exist.
+case `pwd` in
+.) ac_abs_builddir="$ac_dir";;
+*)
+ case "$ac_dir" in
+ .) ac_abs_builddir=`pwd`;;
+ [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";;
+ *) ac_abs_builddir=`pwd`/"$ac_dir";;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_top_builddir=${ac_top_builddir}.;;
+*)
+ case ${ac_top_builddir}. in
+ .) ac_abs_top_builddir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;;
+ *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_srcdir=$ac_srcdir;;
+*)
+ case $ac_srcdir in
+ .) ac_abs_srcdir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;;
+ *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_top_srcdir=$ac_top_srcdir;;
+*)
+ case $ac_top_srcdir in
+ .) ac_abs_top_srcdir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;;
+ *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;;
+ esac;;
+esac
+
+ cd $ac_dir
+ # Check for guested configure; otherwise get Cygnus style configure.
+ if test -f $ac_srcdir/configure.gnu; then
+ echo
+ $SHELL $ac_srcdir/configure.gnu --help=recursive
+ elif test -f $ac_srcdir/configure; then
+ echo
+ $SHELL $ac_srcdir/configure --help=recursive
+ elif test -f $ac_srcdir/configure.ac ||
+ test -f $ac_srcdir/configure.in; then
+ echo
+ $ac_configure --help
+ else
+ echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi
+ cd $ac_popdir
+ done
+fi
+
+test -n "$ac_init_help" && exit 0
+if $ac_init_version; then
+ cat <<\_ACEOF
+
+Copyright (C) 2003 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit 0
+fi
+exec 5>config.log
+cat >&5 <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by $as_me, which was
+generated by GNU Autoconf 2.59. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+hostinfo = `(hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ echo "PATH: $as_dir"
+done
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_sep=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args$ac_sep'$ac_arg'"
+ # Get rid of the leading space.
+ ac_sep=" "
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Be sure not to use single quotes in there, as some shells,
+# such as our DU 5.0 friend, will then `close' the trap.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+{
+ (set) 2>&1 |
+ case `(ac_space='"'"' '"'"'; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ sed -n \
+ "s/'"'"'/'"'"'\\\\'"'"''"'"'/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='"'"'\\2'"'"'/p"
+ ;;
+ *)
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+}
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------- ##
+## Output files. ##
+## ------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=$`echo $ac_var`
+ echo "$ac_var='"'"'$ac_val'"'"'"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ sed "/^$/d" confdefs.h | sort
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ echo "$as_me: caught signal $ac_signal"
+ echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core &&
+ rm -rf conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+ ' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -rf conftest* confdefs.h
+# AIX cpp loses on an empty file, so make sure it contains at least a newline.
+echo >confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer explicitly selected file to automatically selected ones.
+if test -z "$CONFIG_SITE"; then
+ if test "x$prefix" != xNONE; then
+ CONFIG_SITE="$prefix/share/config.site $prefix/etc/config.site"
+ else
+ CONFIG_SITE="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
+ fi
+fi
+for ac_site_file in $CONFIG_SITE; do
+ if test -r "$ac_site_file"; then
+ { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . $cache_file;;
+ *) . ./$cache_file;;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in `(set) 2>&1 |
+ sed -n 's/^ac_env_\([a-zA-Z_0-9]*\)_set=.*/\1/p'`; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val="\$ac_cv_env_${ac_var}_value"
+ eval ac_new_val="\$ac_env_${ac_var}_value"
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
+echo "$as_me: former value: $ac_old_val" >&2;}
+ { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
+echo "$as_me: current value: $ac_new_val" >&2;}
+ ac_cache_corrupted=:
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?\"\']*)
+ ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-apxs or --without-apxs was given.
+if test "${with_apxs+set}" = set; then
+ withval="$with_apxs"
+ APXS="$with_apxs"
+fi;
+
+if test -z "${APXS}"; then
+ for ac_prog in apxs2 apxs
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_APXS+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $APXS in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_APXS="$APXS" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_dummy="$PATH:/usr/local/apache/bin:/usr/sbin"
+for as_dir in $as_dummy
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_APXS="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ ;;
+esac
+fi
+APXS=$ac_cv_path_APXS
+
+if test -n "$APXS"; then
+ echo "$as_me:$LINENO: result: $APXS" >&5
+echo "${ECHO_T}$APXS" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$APXS" && break
+done
+test -n "$APXS" || APXS="apxs"
+
+fi
+
+
+
+
+# Check whether --with-python or --without-python was given.
+if test "${with_python+set}" = set; then
+ withval="$with_python"
+ PYTHON="$with_python"
+fi;
+
+if test -z "${PYTHON}"; then
+ for ac_prog in python
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6
+if test "${ac_cv_path_PYTHON+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ case $PYTHON in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_dummy="$PATH:/usr/local/bin"
+for as_dir in $as_dummy
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+
+ ;;
+esac
+fi
+PYTHON=$ac_cv_path_PYTHON
+
+if test -n "$PYTHON"; then
+ echo "$as_me:$LINENO: result: $PYTHON" >&5
+echo "${ECHO_T}$PYTHON" >&6
+else
+ echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6
+fi
+
+ test -n "$PYTHON" && break
+done
+test -n "$PYTHON" || PYTHON="python"
+
+fi
+
+
+
+PYTHON_VERSION=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("VERSION")'`
+
+CPPFLAGS1=`${PYTHON} -c 'from distutils import sysconfig; \
+ print "-I" + sysconfig.get_config_var("INCLUDEPY")'`
+
+CPPFLAGS2=`${PYTHON} -c 'from distutils import sysconfig; \
+ print " ".join(filter(lambda x: x.startswith("-D"), \
+ sysconfig.get_config_var("CFLAGS").split()))'`
+
+CPPFLAGS="${CPPFLAGS1} ${CPPFLAGS2}"
+
+
+
+PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("PYTHONFRAMEWORKDIR")'`
+PYTHONFRAMEWORK=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("PYTHONFRAMEWORK")'`
+
+if test "${PYTHONFRAMEWORKDIR}" = "no-framework"; then
+ LDFLAGS=`${PYTHON} -c 'import distutils.sysconfig; \
+ print "-L" + distutils.sysconfig.get_python_lib(plat_specific=1, \
+ standard_lib=1) +"/config"'`
+
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ LDLIBS2=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("LIBS")'`
+
+ LDLIBS="${LDLIBS1} ${LDLIBS2}"
+else
+ LDFLAGS1="-framework ${PYTHONFRAMEWORK}"
+
+ VERSION="${PYTHON_VERSION}"
+ STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}"
+ LDFLAGS2=`${PYTHON} -c "from distutils import sysconfig; \
+ print sysconfig.get_config_var(\"LINKFORSHARED\").replace( \
+ \"${STRING}\", '')"`
+
+ LDFLAGS="${LDFLAGS1} ${LDFLAGS2}"
+
+ LDLIBS=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("LIBS")'`
+fi
+
+
+
+
+echo "$as_me:$LINENO: checking Apache version" >&5
+echo $ECHO_N "checking Apache version... $ECHO_C" >&6
+HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`"
+HTTPD_VERSION=`$HTTPD -v | awk '/version/ {print $3}' | awk -F/ '{print $2}'`
+echo "$as_me:$LINENO: result: $HTTPD_VERSION" >&5
+echo "${ECHO_T}$HTTPD_VERSION" >&6
+
+HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'`
+
+rm -f Makefile.in
+ln -s Makefile-${HTTPD_MAJOR_VERSION}.X.in Makefile.in
+
+ ac_config_files="$ac_config_files Makefile"
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+{
+ (set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n \
+ "s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1=\\2/p"
+ ;;
+ esac;
+} |
+ sed '
+ t clear
+ : clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ /^ac_cv_env/!s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ : end' >>confcache
+if diff $cache_file confcache >/dev/null 2>&1; then :; else
+ if test -w $cache_file; then
+ test "x$cache_file" != "x/dev/null" && echo "updating cache $cache_file"
+ cat confcache >$cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/;
+s/:*\${srcdir}:*/:/;
+s/:*@srcdir@:*/:/;
+s/^\([^=]*=[ ]*\):*/\1/;
+s/:*$//;
+s/^[^=]*=[ ]*$//;
+}'
+fi
+
+# Transform confdefs.h into DEFS.
+# Protect against shell expansion while executing Makefile rules.
+# Protect against Makefile macro expansion.
+#
+# If the first sed substitution is executed (which looks for macros that
+# take arguments), then we branch to the quote section. Otherwise,
+# look for a macro that doesn't take arguments.
+cat >confdef2opt.sed <<\_ACEOF
+t clear
+: clear
+s,^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\),-D\1=\2,g
+t quote
+s,^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\),-D\1=\2,g
+t quote
+d
+: quote
+s,[ `~#$^&*(){}\\|;'"<>?],\\&,g
+s,\[,\\&,g
+s,\],\\&,g
+s,\$,$$,g
+p
+_ACEOF
+# We use echo to avoid assuming a particular line-breaking character.
+# The extra dot is to prevent the shell from consuming trailing
+# line-breaks from the sub-command output. A line-break within
+# single-quotes doesn't work because, if this script is created in a
+# platform that uses two characters for line-breaks (e.g., DOS), tr
+# would break.
+ac_LF_and_DOT=`echo; echo .`
+DEFS=`sed -n -f confdef2opt.sed confdefs.h | tr "$ac_LF_and_DOT" ' .'`
+rm -f confdef2opt.sed
+
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_i=`echo "$ac_i" |
+ sed 's/\$U\././;s/\.o$//;s/\.obj$//'`
+ # 2. Add them.
+ ac_libobjs="$ac_libobjs $ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs $ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+elif test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# Work around bugs in pre-3.0 UWIN ksh.
+$as_unset ENV MAIL MAILPATH
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename /) >/dev/null 2>&1 && test "X`basename / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)$' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/; q; }
+ /^X\/\(\/\/\)$/{ s//\1/; q; }
+ /^X\/\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+
+
+# PATH needs CR, and LINENO needs CR and PATH.
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" || {
+ # Find who we are. Look in the path if we contain no path at all
+ # relative or not.
+ case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+
+ ;;
+ esac
+ # We did not find ourselves, most probably we were run as `sh COMMAND'
+ # in which case we are not to be found in the path.
+ if test "x$as_myself" = x; then
+ as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+ { { echo "$as_me:$LINENO: error: cannot find myself; rerun with an absolute path" >&5
+echo "$as_me: error: cannot find myself; rerun with an absolute path" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ case $CONFIG_SHELL in
+ '')
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for as_base in sh bash ksh sh5; do
+ case $as_dir in
+ /*)
+ if ("$as_dir/$as_base" -c '
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ as_lineno_3=`(expr $as_lineno_1 + 1) 2>/dev/null`
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x$as_lineno_3" = "x$as_lineno_2" ') 2>/dev/null; then
+ $as_unset BASH_ENV || test "${BASH_ENV+set}" != set || { BASH_ENV=; export BASH_ENV; }
+ $as_unset ENV || test "${ENV+set}" != set || { ENV=; export ENV; }
+ CONFIG_SHELL=$as_dir/$as_base
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$0" ${1+"$@"}
+ fi;;
+ esac
+ done
+done
+;;
+ esac
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line before each line; the second 'sed' does the real
+ # work. The second script uses 'N' to pair each line-number line
+ # with the numbered line, and appends trailing '-' during
+ # substitution so that $LINENO is not a special case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # second 'sed' script. Blame Lee E. McMahon for sed's syntax. :-)
+ sed '=' <$as_myself |
+ sed '
+ N
+ s,$,-,
+ : loop
+ s,^\(['$as_cr_digits']*\)\(.*\)[$]LINENO\([^'$as_cr_alnum'_]\),\1\2\1\3,
+ t loop
+ s,-$,,
+ s,^['$as_cr_digits']*\n,,
+ ' >$as_me.lineno &&
+ chmod +x $as_me.lineno ||
+ { { echo "$as_me:$LINENO: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&5
+echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2;}
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensible to this).
+ . ./$as_me.lineno
+ # Exit status is that of the last command.
+ exit
+}
+
+
+case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
+ *c*,-n*) ECHO_N= ECHO_C='
+' ECHO_T=' ' ;;
+ *c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
+ *) ECHO_N= ECHO_C='\c' ECHO_T= ;;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ # We could just check for DJGPP; but this test a) works b) is more generic
+ # and c) will remain valid once DJGPP supports symlinks (DJGPP 2.04).
+ if test -f conf$$.exe; then
+ # Don't use ln at all; we don't have any links
+ as_ln_s='cp -p'
+ else
+ as_ln_s='ln -s'
+ fi
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.file
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_executable_p="test -f"
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.
+as_nl='
+'
+IFS=" $as_nl"
+
+# CDPATH.
+$as_unset CDPATH
+
+exec 6>&1
+
+# Open the log real soon, to keep \$[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling. Logging --version etc. is OK.
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+} >&5
+cat >&5 <<_CSEOF
+
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.59. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+_CSEOF
+echo "on `(hostname || uname -n) 2>/dev/null | sed 1q`" >&5
+echo >&5
+_ACEOF
+
+# Files that config.status was made for.
+if test -n "$ac_config_files"; then
+ echo "config_files=\"$ac_config_files\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_headers"; then
+ echo "config_headers=\"$ac_config_headers\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_links"; then
+ echo "config_links=\"$ac_config_links\"" >>$CONFIG_STATUS
+fi
+
+if test -n "$ac_config_commands"; then
+ echo "config_commands=\"$ac_config_commands\"" >>$CONFIG_STATUS
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+
+Configuration files:
+$config_files
+
+Report bugs to <bug-autoconf@gnu.org>."
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+ac_cs_version="\\
+config.status
+configured by $0, generated by GNU Autoconf 2.59,
+ with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright (C) 2003 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+srcdir=$srcdir
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "x$1" : 'x\([^=]*\)='`
+ ac_optarg=`expr "x$1" : 'x[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ -*)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ *) # This is not an option, so the user has probably given explicit
+ # arguments.
+ ac_option=$1
+ ac_need_defaults=false;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --vers* | -V )
+ echo "$ac_cs_version"; exit 0 ;;
+ --he | --h)
+ # Conflict between --help and --header
+ { { echo "$as_me:$LINENO: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { { echo "$as_me:$LINENO: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&5
+echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2;}
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1" ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+if \$ac_cs_recheck; then
+ echo "running $SHELL $0 " $ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
+ exec $SHELL $0 $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+fi
+
+_ACEOF
+
+
+
+
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_config_target in $ac_config_targets
+do
+ case "$ac_config_target" in
+ # Handling of arguments.
+ "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason to put it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Create a temporary directory, and hook for its removal unless debugging.
+$debug ||
+{
+ trap 'exit_status=$?; rm -rf $tmp && exit $exit_status' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d -q "./confstatXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./confstat$$-$RANDOM
+ (umask 077 && mkdir $tmp)
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+
+#
+# CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "\$CONFIG_FILES"; then
+ # Protect against being on the right side of a sed subst in config.status.
+ sed 's/,@/@@/; s/@,/@@/; s/,;t t\$/@;t t/; /@;t t\$/s/[\\\\&,]/\\\\&/g;
+ s/@@/,@/; s/@@/@,/; s/@;t t\$/,;t t/' >\$tmp/subs.sed <<\\CEOF
+s,@SHELL@,$SHELL,;t t
+s,@PATH_SEPARATOR@,$PATH_SEPARATOR,;t t
+s,@PACKAGE_NAME@,$PACKAGE_NAME,;t t
+s,@PACKAGE_TARNAME@,$PACKAGE_TARNAME,;t t
+s,@PACKAGE_VERSION@,$PACKAGE_VERSION,;t t
+s,@PACKAGE_STRING@,$PACKAGE_STRING,;t t
+s,@PACKAGE_BUGREPORT@,$PACKAGE_BUGREPORT,;t t
+s,@exec_prefix@,$exec_prefix,;t t
+s,@prefix@,$prefix,;t t
+s,@program_transform_name@,$program_transform_name,;t t
+s,@bindir@,$bindir,;t t
+s,@sbindir@,$sbindir,;t t
+s,@libexecdir@,$libexecdir,;t t
+s,@datadir@,$datadir,;t t
+s,@sysconfdir@,$sysconfdir,;t t
+s,@sharedstatedir@,$sharedstatedir,;t t
+s,@localstatedir@,$localstatedir,;t t
+s,@libdir@,$libdir,;t t
+s,@includedir@,$includedir,;t t
+s,@oldincludedir@,$oldincludedir,;t t
+s,@infodir@,$infodir,;t t
+s,@mandir@,$mandir,;t t
+s,@build_alias@,$build_alias,;t t
+s,@host_alias@,$host_alias,;t t
+s,@target_alias@,$target_alias,;t t
+s,@DEFS@,$DEFS,;t t
+s,@ECHO_C@,$ECHO_C,;t t
+s,@ECHO_N@,$ECHO_N,;t t
+s,@ECHO_T@,$ECHO_T,;t t
+s,@LIBS@,$LIBS,;t t
+s,@APXS@,$APXS,;t t
+s,@PYTHON@,$PYTHON,;t t
+s,@CPPFLAGS@,$CPPFLAGS,;t t
+s,@LDFLAGS@,$LDFLAGS,;t t
+s,@LDLIBS@,$LDLIBS,;t t
+s,@LIBOBJS@,$LIBOBJS,;t t
+s,@LTLIBOBJS@,$LTLIBOBJS,;t t
+CEOF
+
+_ACEOF
+
+ cat >>$CONFIG_STATUS <<\_ACEOF
+ # Split the substitutions into bite-sized pieces for seds with
+ # small command number limits, like on Digital OSF/1 and HP-UX.
+ ac_max_sed_lines=48
+ ac_sed_frag=1 # Number of current file.
+ ac_beg=1 # First line for current file.
+ ac_end=$ac_max_sed_lines # Line after last line for current file.
+ ac_more_lines=:
+ ac_sed_cmds=
+ while $ac_more_lines; do
+ if test $ac_beg -gt 1; then
+ sed "1,${ac_beg}d; ${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ else
+ sed "${ac_end}q" $tmp/subs.sed >$tmp/subs.frag
+ fi
+ if test ! -s $tmp/subs.frag; then
+ ac_more_lines=false
+ else
+ # The purpose of the label and of the branching condition is to
+ # speed up the sed processing (if there are no `@' at all, there
+ # is no need to browse any of the substitutions).
+ # These are the two extra sed commands mentioned above.
+ (echo ':t
+ /@[a-zA-Z_][a-zA-Z_0-9]*@/!b' && cat $tmp/subs.frag) >$tmp/subs-$ac_sed_frag.sed
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds="sed -f $tmp/subs-$ac_sed_frag.sed"
+ else
+ ac_sed_cmds="$ac_sed_cmds | sed -f $tmp/subs-$ac_sed_frag.sed"
+ fi
+ ac_sed_frag=`expr $ac_sed_frag + 1`
+ ac_beg=$ac_end
+ ac_end=`expr $ac_end + $ac_max_sed_lines`
+ fi
+ done
+ if test -z "$ac_sed_cmds"; then
+ ac_sed_cmds=cat
+ fi
+fi # test -n "$CONFIG_FILES"
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+for ac_file in : $CONFIG_FILES; do test "x$ac_file" = x: && continue
+ # Support "outfile[:infile[:infile...]]", defaulting infile="outfile.in".
+ case $ac_file in
+ - | *:- | *:-:* ) # input from stdin
+ cat >$tmp/stdin
+ ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ *:* ) ac_file_in=`echo "$ac_file" | sed 's,[^:]*:,,'`
+ ac_file=`echo "$ac_file" | sed 's,:.*,,'` ;;
+ * ) ac_file_in=$ac_file.in ;;
+ esac
+
+ # Compute @srcdir@, @top_srcdir@, and @INSTALL@ for subdirectories.
+ ac_dir=`(dirname "$ac_file") 2>/dev/null ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ { if $as_mkdir_p; then
+ mkdir -p "$ac_dir"
+ else
+ as_dir="$ac_dir"
+ as_dirs=
+ while test ! -d "$as_dir"; do
+ as_dirs="$as_dir $as_dirs"
+ as_dir=`(dirname "$as_dir") 2>/dev/null ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| \
+ . : '\(.\)' 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/; q; }
+ /^X\(\/\/\)[^/].*/{ s//\1/; q; }
+ /^X\(\/\/\)$/{ s//\1/; q; }
+ /^X\(\/\).*/{ s//\1/; q; }
+ s/.*/./; q'`
+ done
+ test ! -n "$as_dirs" || mkdir $as_dirs
+ fi || { { echo "$as_me:$LINENO: error: cannot create directory \"$ac_dir\"" >&5
+echo "$as_me: error: cannot create directory \"$ac_dir\"" >&2;}
+ { (exit 1); exit 1; }; }; }
+
+ ac_builddir=.
+
+if test "$ac_dir" != .; then
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A "../" for each directory in $ac_dir_suffix.
+ ac_top_builddir=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,../,g'`
+else
+ ac_dir_suffix= ac_top_builddir=
+fi
+
+case $srcdir in
+ .) # No --srcdir option. We are building in place.
+ ac_srcdir=.
+ if test -z "$ac_top_builddir"; then
+ ac_top_srcdir=.
+ else
+ ac_top_srcdir=`echo $ac_top_builddir | sed 's,/$,,'`
+ fi ;;
+ [\\/]* | ?:[\\/]* ) # Absolute path.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir ;;
+ *) # Relative path.
+ ac_srcdir=$ac_top_builddir$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_builddir$srcdir ;;
+esac
+
+# Do not use `cd foo && pwd` to compute absolute paths, because
+# the directories may not exist.
+case `pwd` in
+.) ac_abs_builddir="$ac_dir";;
+*)
+ case "$ac_dir" in
+ .) ac_abs_builddir=`pwd`;;
+ [\\/]* | ?:[\\/]* ) ac_abs_builddir="$ac_dir";;
+ *) ac_abs_builddir=`pwd`/"$ac_dir";;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_top_builddir=${ac_top_builddir}.;;
+*)
+ case ${ac_top_builddir}. in
+ .) ac_abs_top_builddir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_top_builddir=${ac_top_builddir}.;;
+ *) ac_abs_top_builddir=$ac_abs_builddir/${ac_top_builddir}.;;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_srcdir=$ac_srcdir;;
+*)
+ case $ac_srcdir in
+ .) ac_abs_srcdir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_srcdir=$ac_srcdir;;
+ *) ac_abs_srcdir=$ac_abs_builddir/$ac_srcdir;;
+ esac;;
+esac
+case $ac_abs_builddir in
+.) ac_abs_top_srcdir=$ac_top_srcdir;;
+*)
+ case $ac_top_srcdir in
+ .) ac_abs_top_srcdir=$ac_abs_builddir;;
+ [\\/]* | ?:[\\/]* ) ac_abs_top_srcdir=$ac_top_srcdir;;
+ *) ac_abs_top_srcdir=$ac_abs_builddir/$ac_top_srcdir;;
+ esac;;
+esac
+
+
+
+ if test x"$ac_file" != x-; then
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ rm -f "$ac_file"
+ fi
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ if test x"$ac_file" = x-; then
+ configure_input=
+ else
+ configure_input="$ac_file. "
+ fi
+ configure_input=$configure_input"Generated from `echo $ac_file_in |
+ sed 's,.*/,,'` by configure."
+
+ # First look for the input files in the build tree, otherwise in the
+ # src tree.
+ ac_file_inputs=`IFS=:
+ for f in $ac_file_in; do
+ case $f in
+ -) echo $tmp/stdin ;;
+ [\\/$]*)
+ # Absolute (can't be DOS-style, as IFS=:)
+ test -f "$f" || { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ echo "$f";;
+ *) # Relative
+ if test -f "$f"; then
+ # Build tree
+ echo "$f"
+ elif test -f "$srcdir/$f"; then
+ # Source tree
+ echo "$srcdir/$f"
+ else
+ # /dev/null tree
+ { { echo "$as_me:$LINENO: error: cannot find input file: $f" >&5
+echo "$as_me: error: cannot find input file: $f" >&2;}
+ { (exit 1); exit 1; }; }
+ fi;;
+ esac
+ done` || { (exit 1); exit 1; }
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ sed "$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s,@configure_input@,$configure_input,;t t
+s,@srcdir@,$ac_srcdir,;t t
+s,@abs_srcdir@,$ac_abs_srcdir,;t t
+s,@top_srcdir@,$ac_top_srcdir,;t t
+s,@abs_top_srcdir@,$ac_abs_top_srcdir,;t t
+s,@builddir@,$ac_builddir,;t t
+s,@abs_builddir@,$ac_abs_builddir,;t t
+s,@top_builddir@,$ac_top_builddir,;t t
+s,@abs_top_builddir@,$ac_abs_top_builddir,;t t
+" $ac_file_inputs | (eval "$ac_sed_cmds") >$tmp/out
+ rm -f $tmp/stdin
+ if test x"$ac_file" != x-; then
+ mv $tmp/out $ac_file
+ else
+ cat $tmp/out
+ rm -f $tmp/out
+ fi
+
+done
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..270fb03
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,100 @@
+dnl vim: set sw=4 expandtab :
+dnl
+dnl Copyright 2007 GRAHAM DUMPLETON
+dnl
+dnl Licensed under the Apache License, Version 2.0 (the "License");
+dnl you may not use this file except in compliance with the License.
+dnl You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl Process this file with autoconf to produce a configure script.
+
+AC_INIT(mod_wsgi.c)
+
+AC_ARG_WITH(apxs, AC_HELP_STRING([--with-apxs=NAME],
+ [name of the apxs executable [[apxs]]]),
+ [APXS="$with_apxs"])
+
+if test -z "${APXS}"; then
+ AC_PATH_PROGS(APXS, apxs2 apxs, [apxs],
+ [$PATH:/usr/local/apache/bin:/usr/sbin])
+fi
+
+AC_SUBST(APXS)
+
+AC_ARG_WITH(python, AC_HELP_STRING([--with-python=NAME],
+ [name of the python executable [[python]]]),
+ [PYTHON="$with_python"])
+
+if test -z "${PYTHON}"; then
+ AC_PATH_PROGS(PYTHON, python, [python],
+ [$PATH:/usr/local/bin])
+fi
+
+AC_SUBST(PYTHON)
+
+PYTHON_VERSION=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("VERSION")'`
+
+CPPFLAGS1=`${PYTHON} -c 'from distutils import sysconfig; \
+ print "-I" + sysconfig.get_config_var("INCLUDEPY")'`
+
+CPPFLAGS2=`${PYTHON} -c 'from distutils import sysconfig; \
+ print " ".join(filter(lambda x: x.startswith("-D"), \
+ sysconfig.get_config_var("CFLAGS").split()))'`
+
+CPPFLAGS="${CPPFLAGS1} ${CPPFLAGS2}"
+
+AC_SUBST(CPPFLAGS)
+
+PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("PYTHONFRAMEWORKDIR")'`
+PYTHONFRAMEWORK=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("PYTHONFRAMEWORK")'`
+
+if test "${PYTHONFRAMEWORKDIR}" = "no-framework"; then
+ LDFLAGS=`${PYTHON} -c 'import distutils.sysconfig; \
+ print "-L" + distutils.sysconfig.get_python_lib(plat_specific=1, \
+ standard_lib=1) +"/config"'`
+
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ LDLIBS2=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("LIBS")'`
+
+ LDLIBS="${LDLIBS1} ${LDLIBS2}"
+else
+ LDFLAGS1="-framework ${PYTHONFRAMEWORK}"
+
+ VERSION="${PYTHON_VERSION}"
+ STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}"
+ LDFLAGS2=`${PYTHON} -c "from distutils import sysconfig; \
+ print sysconfig.get_config_var(\"LINKFORSHARED\").replace( \
+ \"${STRING}\", '')"`
+
+ LDFLAGS="${LDFLAGS1} ${LDFLAGS2}"
+
+ LDLIBS=`${PYTHON} -c 'from distutils import sysconfig; \
+ print sysconfig.get_config_var("LIBS")'`
+fi
+
+AC_SUBST(LDFLAGS)
+AC_SUBST(LDLIBS)
+
+AC_MSG_CHECKING(Apache version)
+HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`"
+HTTPD_VERSION=`$HTTPD -v | awk '/version/ {print $3}' | awk -F/ '{print $2}'`
+AC_MSG_RESULT($HTTPD_VERSION)
+
+HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'`
+
+rm -f Makefile.in
+ln -s Makefile-${HTTPD_MAJOR_VERSION}.X.in Makefile.in
+
+AC_OUTPUT(Makefile)
diff --git a/mod_wsgi.c b/mod_wsgi.c
new file mode 100644
index 0000000..6f934f4
--- /dev/null
+++ b/mod_wsgi.c
@@ -0,0 +1,6893 @@
+/* vim: set sw=4 expandtab : */
+
+/*
+ * Copyright 2007 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Enabled access to Apache private API and data structures. Need to do
+ * this to access the following:
+ *
+ * In Apache 1.3 it is not possible to access ap_check_cmd_context()
+ * where as this was made public in Apache 2.0.
+ *
+ */
+
+#define CORE_PRIVATE 1
+
+#include "httpd.h"
+
+#if !defined(AP_SERVER_MAJORVERSION_NUMBER)
+#if AP_MODULE_MAGIC_AT_LEAST(20010224,0)
+#define AP_SERVER_MAJORVERSION_NUMBER 2
+#else
+#define AP_SERVER_MAJORVERSION_NUMBER 1
+#endif
+#endif
+
+#if !defined(AP_SERVER_BASEVERSION)
+#define AP_SERVER_BASEVERSION SERVER_BASEVERSION
+#endif
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+typedef int apr_status_t;
+#define APR_SUCCESS 0
+typedef pool apr_pool_t;
+typedef unsigned int apr_port_t;
+#include "ap_alloc.h"
+#define apr_table_make ap_make_table
+#define apr_table_get ap_table_get
+#define apr_table_set ap_table_set
+#define apr_table_setn ap_table_setn
+#define apr_table_elts ap_table_elts
+#define apr_array_make ap_make_array
+#define apr_array_push ap_push_array
+#define apr_array_cat ap_array_cat
+#define apr_array_append ap_append_arrays
+typedef array_header apr_array_header_t;
+typedef table apr_table_t;
+typedef table_entry apr_table_entry_t;
+typedef int apr_size_t;
+#define apr_psprintf ap_psprintf
+#define apr_pstrndup ap_pstrndup
+#define apr_pstrdup ap_pstrdup
+#define apr_pstrcat ap_pstrcat
+#define apr_pcalloc ap_pcalloc
+typedef time_t apr_time_t;
+#include "http_config.h"
+typedef int apr_lockmech_e;
+#else
+#include "ap_mpm.h"
+#include "ap_compat.h"
+#include "apr_tables.h"
+#include "apr_strings.h"
+#include "http_config.h"
+#include "ap_listen.h"
+#include "apr_version.h"
+#endif
+
+#include "ap_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+#include "util_md5.h"
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20050127,0)
+/* Debian backported ap_regex_t to Apache 2.0 and
+ * thus made official version checking break. */
+#ifndef AP_REG_EXTENDED
+typedef regex_t ap_regex_t;
+typedef regmatch_t ap_regmatch_t;
+#define AP_REG_EXTENDED REG_EXTENDED
+#endif
+#endif
+
+#include "Python.h"
+#include "compile.h"
+#include "node.h"
+
+#if !defined(PY_VERSION_HEX) || PY_VERSION_HEX <= 0x02030000
+#error Sorry, mod_wsgi requires at least Python 2.3.0.
+#endif
+
+#if !defined(WITH_THREAD)
+#error Sorry, mod_wsgi requires that Python supporting thread.
+#endif
+
+#ifndef WIN32
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+#if APR_HAS_OTHER_CHILD && APR_HAS_THREADS && APR_HAS_FORK
+#define MOD_WSGI_WITH_DAEMONS 1
+#endif
+#endif
+#endif
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20051115,0)
+static void ap_close_listeners(void)
+{
+ ap_listen_rec *lr;
+
+ for (lr = ap_listeners; lr; lr = lr->next) {
+ apr_socket_close(lr->sd);
+ lr->active = 0;
+ }
+}
+#endif
+
+#if (APR_MAJOR_VERSION == 0) && \
+ (APR_MINOR_VERSION == 9) && \
+ (APR_PATCH_VERSION < 5)
+static apr_status_t apr_unix_file_cleanup(void *thefile)
+{
+ apr_file_t *file = thefile;
+
+ return apr_file_close(file);
+}
+
+static apr_status_t apr_os_pipe_put_ex(apr_file_t **file,
+ apr_os_file_t *thefile,
+ int register_cleanup,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+
+ rv = apr_os_pipe_put(file, thefile, pool);
+
+ if (register_cleanup) {
+ apr_pool_cleanup_register(pool, (void *)(*file),
+ apr_unix_file_cleanup,
+ apr_pool_cleanup_null);
+ }
+
+ return rv;
+}
+#endif
+
+#endif
+
+/* Compatibility macros for log level and status. */
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+#define WSGI_LOG_LEVEL(l) l
+#define WSGI_LOG_LEVEL_AND_STATUS(l, e) l | (!e ? APLOG_NOERRNO : 0)
+#else
+#define WSGI_LOG_LEVEL(l) l, 0
+#define WSGI_LOG_LEVEL_AND_STATUS(l, e) l, e
+#endif
+
+#define WSGI_LOG_EMERG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_EMERG, e)
+#define WSGI_LOG_ALERT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ALERT, e)
+#define WSGI_LOG_CRIT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_CRIT, e)
+#define WSGI_LOG_ERR(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ERR, e)
+#define WSGI_LOG_WARNING(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_WARNING, e)
+#define WSGI_LOG_NOTICE(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_NOTICE, e)
+#define WSGI_LOG_INFO(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_INFO, e)
+#define WSGI_LOG_DEBUG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_DEBUG, e)
+
+/* Version and module information. */
+
+#define MOD_WSGI_MAJORVERSION_NUMBER 1
+#define MOD_WSGI_MINORVERSION_NUMBER 0
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+module MODULE_VAR_EXPORT wsgi_module;
+#else
+module AP_MODULE_DECLARE_DATA wsgi_module;
+#endif
+
+/* Base server object. */
+
+static server_rec *wsgi_server = NULL;
+
+/* Process information. */
+
+static pid_t wsgi_parent_pid = 0;
+static int wsgi_multiprocess = 1;
+static int wsgi_multithread = 1;
+
+/* Daemon process list. */
+
+apr_array_header_t *wsgi_daemon_list = NULL;
+
+/* Configuration objects. */
+
+typedef struct {
+ const char *location;
+ const char *application;
+ ap_regex_t *regexp;
+} WSGIAliasEntry;
+
+typedef struct {
+ apr_pool_t *pool;
+
+ apr_array_header_t *alias_list;
+
+ const char *socket_prefix;
+ apr_lockmech_e lock_mechanism;
+
+ int python_optimize;
+ const char *python_executable;
+ const char *python_home;
+ const char *python_path;
+
+ int restrict_stdin;
+ int restrict_stdout;
+ int restrict_signal;
+
+ apr_table_t *restrict_process;
+
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+
+ int pass_authorization;
+ int script_reloading;
+ int reload_mechanism;
+ int output_buffering;
+ int case_sensitivity;
+} WSGIServerConfig;
+
+static WSGIServerConfig *wsgi_server_config = NULL;
+
+static WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p)
+{
+ WSGIServerConfig *object = NULL;
+
+ object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig));
+
+ object->pool = p;
+
+ object->alias_list = NULL;
+
+ object->socket_prefix = NULL;
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi";
+ object->socket_prefix = apr_psprintf(p, "%s.%d", object->socket_prefix,
+ getpid());
+ object->socket_prefix = ap_server_root_relative(p, object->socket_prefix);
+#endif
+
+ object->python_optimize = -1;
+ object->python_executable = NULL;
+ object->python_home = NULL;
+ object->python_path = NULL;
+
+ object->restrict_stdin = -1;
+ object->restrict_stdout = -1;
+ object->restrict_signal = -1;
+
+ object->restrict_process = NULL;
+
+ object->process_group = NULL;
+ object->application_group = NULL;
+ object->callable_object = NULL;
+
+ object->pass_authorization = -1;
+ object->script_reloading = -1;
+ object->reload_mechanism = -1;
+ object->output_buffering = -1;
+ object->case_sensitivity = -1;
+
+ return object;
+}
+
+static void *wsgi_create_server_config(apr_pool_t *p, server_rec *s)
+{
+ WSGIServerConfig *config = NULL;
+
+ config = newWSGIServerConfig(p);
+
+ return config;
+}
+
+static void *wsgi_merge_server_config(apr_pool_t *p, void *base_conf,
+ void *new_conf)
+{
+ WSGIServerConfig *config = NULL;
+ WSGIServerConfig *parent = NULL;
+ WSGIServerConfig *child = NULL;
+
+ config = newWSGIServerConfig(p);
+
+ parent = (WSGIServerConfig *)base_conf;
+ child = (WSGIServerConfig *)new_conf;
+
+ if (child->alias_list && parent->alias_list) {
+ config->alias_list = apr_array_append(p, child->alias_list,
+ parent->alias_list);
+ }
+ else if (child->alias_list) {
+ config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry));
+ apr_array_cat(config->alias_list, child->alias_list);
+ }
+ else if (parent->alias_list) {
+ config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry));
+ apr_array_cat(config->alias_list, parent->alias_list);
+ }
+
+ if (child->restrict_process)
+ config->restrict_process = child->restrict_process;
+ else
+ config->restrict_process = parent->restrict_process;
+
+ if (child->process_group)
+ config->process_group = child->process_group;
+ else
+ config->process_group = parent->process_group;
+
+ if (child->application_group)
+ config->application_group = child->application_group;
+ else
+ config->application_group = parent->application_group;
+
+ if (child->callable_object)
+ config->callable_object = child->callable_object;
+ else
+ config->callable_object = parent->callable_object;
+
+ if (child->pass_authorization != -1)
+ config->pass_authorization = child->pass_authorization;
+ else
+ config->pass_authorization = parent->pass_authorization;
+
+ if (child->script_reloading != -1)
+ config->script_reloading = child->script_reloading;
+ else
+ config->script_reloading = parent->script_reloading;
+
+ if (child->reload_mechanism != -1)
+ config->reload_mechanism = child->reload_mechanism;
+ else
+ config->reload_mechanism = parent->reload_mechanism;
+
+ if (child->output_buffering != -1)
+ config->output_buffering = child->output_buffering;
+ else
+ config->output_buffering = parent->output_buffering;
+
+ if (child->case_sensitivity != -1)
+ config->case_sensitivity = child->case_sensitivity;
+ else
+ config->case_sensitivity = parent->case_sensitivity;
+
+ return config;
+}
+
+typedef struct {
+ apr_pool_t *pool;
+
+ apr_table_t *restrict_process;
+
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+
+ int pass_authorization;
+ int script_reloading;
+ int reload_mechanism;
+ int output_buffering;
+ int case_sensitivity;
+} WSGIDirectoryConfig;
+
+static WSGIDirectoryConfig *newWSGIDirectoryConfig(apr_pool_t *p)
+{
+ WSGIDirectoryConfig *object = NULL;
+
+ object = (WSGIDirectoryConfig *)apr_pcalloc(p, sizeof(WSGIDirectoryConfig));
+
+ object->pool = p;
+
+ object->process_group = NULL;
+ object->application_group = NULL;
+ object->callable_object = NULL;
+
+ object->pass_authorization = -1;
+ object->script_reloading = -1;
+ object->reload_mechanism = -1;
+ object->output_buffering = -1;
+ object->case_sensitivity = -1;
+
+ return object;
+}
+
+static void *wsgi_create_dir_config(apr_pool_t *p, char *dir)
+{
+ WSGIDirectoryConfig *config = NULL;
+
+ config = newWSGIDirectoryConfig(p);
+
+ return config;
+}
+
+static void *wsgi_merge_dir_config(apr_pool_t *p, void *base_conf,
+ void *new_conf)
+{
+ WSGIDirectoryConfig *config = NULL;
+ WSGIDirectoryConfig *parent = NULL;
+ WSGIDirectoryConfig *child = NULL;
+
+ config = newWSGIDirectoryConfig(p);
+
+ parent = (WSGIDirectoryConfig *)base_conf;
+ child = (WSGIDirectoryConfig *)new_conf;
+
+ if (child->restrict_process)
+ config->restrict_process = child->restrict_process;
+ else
+ config->restrict_process = parent->restrict_process;
+
+ if (child->process_group)
+ config->process_group = child->process_group;
+ else
+ config->process_group = parent->process_group;
+
+ if (child->application_group)
+ config->application_group = child->application_group;
+ else
+ config->application_group = parent->application_group;
+
+ if (child->callable_object)
+ config->callable_object = child->callable_object;
+ else
+ config->callable_object = parent->callable_object;
+
+ if (child->pass_authorization != -1)
+ config->pass_authorization = child->pass_authorization;
+ else
+ config->pass_authorization = parent->pass_authorization;
+
+ if (child->script_reloading != -1)
+ config->script_reloading = child->script_reloading;
+ else
+ config->script_reloading = parent->script_reloading;
+
+ if (child->reload_mechanism != -1)
+ config->reload_mechanism = child->reload_mechanism;
+ else
+ config->reload_mechanism = parent->reload_mechanism;
+
+ if (child->output_buffering != -1)
+ config->output_buffering = child->output_buffering;
+ else
+ config->output_buffering = parent->output_buffering;
+
+ if (child->case_sensitivity != -1)
+ config->case_sensitivity = child->case_sensitivity;
+ else
+ config->case_sensitivity = parent->case_sensitivity;
+
+ return config;
+}
+
+typedef struct {
+ apr_pool_t *pool;
+
+ apr_table_t *restrict_process;
+
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+
+ int pass_authorization;
+ int script_reloading;
+ int reload_mechanism;
+ int output_buffering;
+ int case_sensitivity;
+} WSGIRequestConfig;
+
+static const char *wsgi_script_name(request_rec *r)
+{
+ char *script_name = NULL;
+ int path_info_start = 0;
+
+ if (!r->path_info || !*r->path_info) {
+ script_name = apr_pstrdup(r->pool, r->uri);
+ }
+ else {
+ path_info_start = ap_find_path_info(r->uri, r->path_info);
+
+ script_name = apr_pstrndup(r->pool, r->uri, path_info_start);
+ }
+
+ if (*script_name) {
+ while (*script_name && (*(script_name+1) == '/'))
+ script_name++;
+ script_name = apr_pstrdup(r->pool, script_name);
+ ap_no2slash((char*)script_name);
+ }
+
+ ap_str_tolower(script_name);
+
+ return script_name;
+}
+
+static const char *wsgi_process_group(request_rec *r, const char *s)
+{
+ const char *name = NULL;
+ const char *value = NULL;
+
+ if (!s)
+ return "";
+
+ if (*s != '%')
+ return s;
+
+ name = s + 1;
+
+ if (*name) {
+ if (!strcmp(name, "{GLOBAL}"))
+ return "";
+
+ if (strstr(name, "{ENV:") == name) {
+ int len = 0;
+
+ name = name + 5;
+ len = strlen(name);
+
+ if (len && name[len-1] == '}') {
+ name = apr_pstrndup(r->pool, name, len-1);
+
+ value = apr_table_get(r->notes, name);
+
+ if (!value)
+ value = apr_table_get(r->subprocess_env, name);
+
+ if (!value)
+ value = getenv(name);
+
+ if (value) {
+ if (*value == '%' && strstr(value, "%{ENV:") != value)
+ return wsgi_process_group(r, value);
+
+ return value;
+ }
+ }
+ }
+ }
+
+ return s;
+}
+
+static const char *wsgi_application_group(request_rec *r, const char *s)
+{
+ const char *name = NULL;
+ const char *value = NULL;
+
+ const char *h = NULL;
+ apr_port_t p = 0;
+ const char *n = NULL;
+
+ if (!s) {
+ h = r->server->server_hostname;
+ p = ap_get_server_port(r);
+ n = wsgi_script_name(r);
+
+ if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT)
+ return apr_psprintf(r->pool, "%s:%u|%s", h, p, n);
+ else
+ return apr_psprintf(r->pool, "%s|%s", h, n);
+ }
+
+ if (*s != '%')
+ return s;
+
+ name = s + 1;
+
+ if (*name) {
+ if (!strcmp(name, "{RESOURCE}")) {
+ h = r->server->server_hostname;
+ p = ap_get_server_port(r);
+ n = wsgi_script_name(r);
+
+ if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT)
+ return apr_psprintf(r->pool, "%s:%u|%s", h, p, n);
+ else
+ return apr_psprintf(r->pool, "%s|%s", h, n);
+ }
+
+ if (!strcmp(name, "{SERVER}")) {
+ h = r->server->server_hostname;
+ p = ap_get_server_port(r);
+
+ if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT)
+ return apr_psprintf(r->pool, "%s:%u", h, p);
+ else
+ return h;
+ }
+
+ if (!strcmp(name, "{GLOBAL}"))
+ return "";
+
+ if (strstr(name, "{ENV:") == name) {
+ int len = 0;
+
+ name = name + 5;
+ len = strlen(name);
+
+ if (len && name[len-1] == '}') {
+ name = apr_pstrndup(r->pool, name, len-1);
+
+ value = apr_table_get(r->notes, name);
+
+ if (!value)
+ value = apr_table_get(r->subprocess_env, name);
+
+ if (!value)
+ value = getenv(name);
+
+ if (value) {
+ if (*value == '%' && strstr(value, "%{ENV:") != value)
+ return wsgi_application_group(r, value);
+
+ return value;
+ }
+ }
+ }
+ }
+
+ return s;
+}
+
+static const char *wsgi_callable_object(request_rec *r, const char *s)
+{
+ const char *name = NULL;
+ const char *value = NULL;
+
+ if (!s)
+ return "application";
+
+ if (*s != '%')
+ return s;
+
+ name = s + 1;
+
+ if (!*name)
+ return "application";
+
+ if (strstr(name, "{ENV:") == name) {
+ int len = 0;
+
+ name = name + 5;
+ len = strlen(name);
+
+ if (len && name[len-1] == '}') {
+ name = apr_pstrndup(r->pool, name, len-1);
+
+ value = apr_table_get(r->notes, name);
+
+ if (!value)
+ value = apr_table_get(r->subprocess_env, name);
+
+ if (!value)
+ value = getenv(name);
+
+ if (value)
+ return value;
+ }
+ }
+
+ return "application";
+}
+
+static WSGIRequestConfig *wsgi_create_req_config(apr_pool_t *p, request_rec *r)
+{
+ WSGIRequestConfig *config = NULL;
+ WSGIServerConfig *sconfig = NULL;
+ WSGIDirectoryConfig *dconfig = NULL;
+
+ config = (WSGIRequestConfig *)apr_pcalloc(p, sizeof(WSGIRequestConfig));
+
+ dconfig = ap_get_module_config(r->per_dir_config, &wsgi_module);
+ sconfig = ap_get_module_config(r->server->module_config, &wsgi_module);
+
+ config->pool = p;
+
+ config->restrict_process = dconfig->restrict_process;
+
+ if (!config->restrict_process)
+ config->restrict_process = sconfig->restrict_process;
+
+ config->process_group = dconfig->process_group;
+
+ if (!config->process_group)
+ config->process_group = sconfig->process_group;
+
+ config->process_group = wsgi_process_group(r, config->process_group);
+
+ config->application_group = dconfig->application_group;
+
+ if (!config->application_group)
+ config->application_group = sconfig->application_group;
+
+ config->application_group = wsgi_application_group(r,
+ config->application_group);
+
+ config->callable_object = dconfig->callable_object;
+
+ if (!config->callable_object)
+ config->callable_object = sconfig->callable_object;
+
+ config->callable_object = wsgi_callable_object(r, config->callable_object);
+
+ config->pass_authorization = dconfig->pass_authorization;
+
+ if (config->pass_authorization < 0) {
+ config->pass_authorization = sconfig->pass_authorization;
+ if (config->pass_authorization < 0)
+ config->pass_authorization = 0;
+ }
+
+ config->script_reloading = dconfig->script_reloading;
+
+ if (config->script_reloading < 0) {
+ config->script_reloading = sconfig->script_reloading;
+ if (config->script_reloading < 0)
+ config->script_reloading = 1;
+ }
+
+ config->reload_mechanism = dconfig->reload_mechanism;
+
+ if (config->reload_mechanism < 0) {
+ config->reload_mechanism = sconfig->reload_mechanism;
+ if (config->reload_mechanism < 0)
+ config->reload_mechanism = 0;
+ }
+
+ config->output_buffering = dconfig->output_buffering;
+
+ if (config->output_buffering < 0) {
+ config->output_buffering = sconfig->output_buffering;
+ if (config->output_buffering < 0)
+ config->output_buffering = 0;
+ }
+
+ config->case_sensitivity = dconfig->case_sensitivity;
+
+ if (config->case_sensitivity < 0) {
+ config->case_sensitivity = sconfig->case_sensitivity;
+ if (config->case_sensitivity < 0) {
+#if defined(WIN32) || defined(DARWIN)
+ config->case_sensitivity = 0;
+#else
+ config->case_sensitivity = 1;
+#endif
+ }
+ }
+
+ return config;
+}
+
+/* Class objects used by response handler. */
+
+typedef struct {
+ PyObject_HEAD
+ request_rec *r;
+ int level;
+ char *s;
+ int expired;
+} LogObject;
+
+static PyTypeObject Log_Type;
+
+static LogObject *newLogObject(request_rec *r, int level)
+{
+ LogObject *self;
+
+ self = PyObject_New(LogObject, &Log_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->r = r;
+ self->level = APLOG_NOERRNO|level;
+ self->s = NULL;
+ self->expired = 0;
+
+ return self;
+}
+
+static void Log_dealloc(LogObject *self)
+{
+ if (self->s) {
+ if (self->r) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ self->r, "%s", self->s);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ wsgi_server, "%s", self->s);
+ }
+
+ free(self->s);
+ }
+
+ PyObject_Del(self);
+}
+
+static PyObject *Log_flush(LogObject *self, PyObject *args)
+{
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, ":flush"))
+ return NULL;
+
+ if (self->s) {
+ if (self->r) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ self->r, "%s", self->s);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ wsgi_server, "%s", self->s);
+ }
+
+ free(self->s);
+ self->s = NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static void Log_output(LogObject *self, const char *msg)
+{
+ const char *p = NULL;
+ const char *q = NULL;
+
+ p = msg;
+
+ q = strchr(p, '\n');
+
+ while (q) {
+ /* Output each complete line. */
+
+ if (self->s) {
+ /* Need to join with buffered value. */
+
+ int m = 0;
+ int n = 0;
+ char *s = NULL;
+
+ m = strlen(self->s);
+ n = m+q-p+1;
+
+ s = (char *)malloc(n);
+ strncpy(s, self->s, m);
+ strncpy(s+m, p, q-p);
+ s[n-1] = '\0';
+
+ if (self->r) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ self->r, "%s", s);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ wsgi_server, "%s", s);
+ }
+
+ free(self->s);
+ self->s = NULL;
+
+ free(s);
+ }
+ else {
+ int n = 0;
+ char *s = NULL;
+
+ n = q-p+1;
+
+ s = (char *)malloc(n);
+ strncpy(s, p, q-p);
+ s[n-1] = '\0';
+
+ if (self->r) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ self->r, "%s", s);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
+ wsgi_server, "%s", s);
+ }
+
+ free(s);
+ }
+
+ p = q+1;
+ q = strchr(p, '\n');
+ }
+
+ if (*p) {
+ /* Save away incomplete line. */
+
+ if (self->s) {
+ /* Need to join with buffered value. */
+
+ int m = 0;
+ int n = 0;
+
+ m = strlen(self->s);
+ n = strlen(p);
+
+ self->s = (char *)realloc(self->s, m+n+1);
+ strncpy(self->s+m, p, n);
+ self->s[m+n] = '\0';
+ }
+ else {
+ self->s = (char *)malloc(strlen(p)+1);
+ strcpy(self->s, p);
+ }
+ }
+}
+
+static PyObject *Log_write(LogObject *self, PyObject *args)
+{
+ const char *msg = NULL;
+
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "s:write", &msg))
+ return NULL;
+
+ Log_output(self, msg);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Log_writelines(LogObject *self, PyObject *args)
+{
+ PyObject *sequence = NULL;
+ PyObject *iterator = NULL;
+ PyObject *item = NULL;
+ const char *msg = NULL;
+
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "O:writelines", &sequence))
+ return NULL;
+
+ iterator = PyObject_GetIter(sequence);
+
+ if (iterator == NULL)
+ return NULL;
+
+ while ((item = PyIter_Next(iterator))) {
+ msg = PyString_AsString(item);
+
+ if (msg) {
+ Log_output(self, msg);
+
+ Py_DECREF(item);
+ }
+ else {
+ Py_DECREF(item);
+
+ break;
+ }
+ }
+
+ Py_DECREF(iterator);
+
+ if (item && !msg)
+ return NULL;
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef Log_methods[] = {
+ { "flush", (PyCFunction)Log_flush, METH_VARARGS, 0},
+ { "write", (PyCFunction)Log_write, METH_VARARGS, 0},
+ { "writelines", (PyCFunction)Log_writelines, METH_VARARGS, 0},
+ { NULL, NULL}
+};
+
+static PyTypeObject Log_Type = {
+ /* The ob_type field must be initialized in the module init function
+ * to be portable to Windows without using C++. */
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "mod_wsgi.Log", /*tp_name*/
+ sizeof(LogObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Log_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ Log_methods, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+void wsgi_log_python_error(request_rec *r, LogObject *log)
+{
+ if (!PyErr_Occurred())
+ return;
+
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ "mod_wsgi (pid=%d): SystemExit exception raised by "
+ "WSGI script '%s' ignored.", getpid(), r->filename);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ "mod_wsgi (pid=%d): Exception occurred within WSGI "
+ "script '%s'.", getpid(), r->filename);
+ }
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ args = Py_BuildValue("(OOOOO)", type, value, traceback,
+ Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ }
+ Py_DECREF(o);
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ if (Py_FlushLine())
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_XDECREF(m);
+}
+
+typedef struct {
+ PyObject_HEAD
+ request_rec *r;
+ int init;
+ int done;
+ char *buffer;
+ apr_size_t size;
+ apr_size_t offset;
+ apr_size_t length;
+} InputObject;
+
+static PyTypeObject Input_Type;
+
+static InputObject *newInputObject(request_rec *r)
+{
+ InputObject *self;
+
+ self = PyObject_New(InputObject, &Input_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->r = r;
+ self->init = 0;
+ self->done = 0;
+
+ self->buffer = NULL;
+ self->size = 0;
+ self->offset = 0;
+ self->length = 0;
+
+ return self;
+}
+
+static void Input_dealloc(InputObject *self)
+{
+ if (self->buffer)
+ free(self->buffer);
+
+ PyObject_Del(self);
+}
+
+static PyObject *Input_close(InputObject *self, PyObject *args)
+{
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, ":close"))
+ return NULL;
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Input_read(InputObject *self, PyObject *args)
+{
+ long size = -1;
+ int blocking = 1;
+
+ PyObject *result = NULL;
+ char *buffer = NULL;
+ apr_size_t length = 0;
+
+ apr_size_t n;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "|l:read", &size))
+ return NULL;
+
+ if (!self->init) {
+ if (!ap_should_client_block(self->r))
+ self->done = 1;
+
+ self->init = 1;
+ }
+
+ /*
+ * No point continuing if requested size is zero or if no
+ * more data to read and no buffered data.
+ */
+
+ if ((self->done && self->length == 0) || size == 0)
+ return PyString_FromString("");
+
+ /*
+ * If size is not specified for the number of bytes to read
+ * in, default to reading in standard Apache block size.
+ * Denote that blocking until we accumulate data of
+ * specified size is disabled in this case.
+ */
+
+ if (size < 0) {
+ size = HUGE_STRING_LEN;
+ blocking = 0;
+ }
+
+ /* Allocate string of the exact size required. */
+
+ result = PyString_FromStringAndSize(NULL, size);
+
+ if (!result)
+ return NULL;
+
+ buffer = PyString_AS_STRING((PyStringObject *)result);
+
+ /* Copy any residual data from use of readline(). */
+
+ if (self->buffer && self->length) {
+ if (size >= self->length) {
+ length = self->length;
+ memcpy(buffer, self->buffer + self->offset, length);
+ self->offset = 0;
+ self->length = 0;
+ }
+ else {
+ length = size;
+ memcpy(buffer, self->buffer + self->offset, length);
+ self->offset += length;
+ self->length -= length;
+ }
+ }
+
+ /* If all data residual buffer consumed then free it. */
+
+ if (!self->length) {
+ free(self->buffer);
+ self->buffer = NULL;
+ }
+
+ /*
+ * If not required to block and we already have some data
+ * from residual buffer we can return immediately.
+ */
+
+ if (!blocking && length != 0) {
+ if (length != size) {
+ if (_PyString_Resize(&result, length))
+ return NULL;
+ }
+
+ return result;
+ }
+
+ /*
+ * Read in remaining data required to achieve size. If
+ * requested size of data wasn't able to be read in just
+ * return what was able to be read if blocking not required.
+ */
+
+ if (length < size) {
+ while (length != size) {
+ Py_BEGIN_ALLOW_THREADS
+ n = ap_get_client_block(self->r, buffer + length, size - length);
+ Py_END_ALLOW_THREADS
+
+ if (n == -1) {
+ PyErr_SetString(PyExc_IOError, "request data read error");
+ Py_DECREF(result);
+ return NULL;
+ }
+ else if (n == 0) {
+ /* Have exhausted all the available input data. */
+
+ self->done = 1;
+ break;
+ }
+
+ length += n;
+
+ /* Don't read more if not required to block. */
+
+ if (!blocking)
+ break;
+ }
+
+ /*
+ * Resize the final string. If the size reduction is
+ * by more than 25% of the string size, then Python
+ * will allocate a new block of memory and copy the
+ * data into it.
+ */
+
+ if (length != size) {
+ if (_PyString_Resize(&result, length))
+ return NULL;
+ }
+ }
+
+ return result;
+}
+
+static PyObject *Input_readline(InputObject *self, PyObject *args)
+{
+ long size = -1;
+
+ PyObject *result = NULL;
+ char *buffer = NULL;
+ apr_size_t length = 0;
+
+ apr_size_t n;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "|l:readline", &size))
+ return NULL;
+
+ if (!self->init) {
+ if (!ap_should_client_block(self->r))
+ self->done = 1;
+
+ self->init = 1;
+ }
+
+ /*
+ * No point continuing if requested size is zero or if no
+ * more data to read and no buffered data.
+ */
+
+ if ((self->done && self->length == 0) || size == 0)
+ return PyString_FromString("");
+
+ /*
+ * First deal with case where size has been specified. After
+ * that deal with case where expected that a complete line
+ * is returned regardless of the size.
+ */
+
+ if (size > 0) {
+ /* Allocate string of the exact size required. */
+
+ result = PyString_FromStringAndSize(NULL, size);
+
+ if (!result)
+ return NULL;
+
+ buffer = PyString_AS_STRING((PyStringObject *)result);
+
+ /* Copy any residual data from use of readline(). */
+
+ if (self->buffer && self->length) {
+ char *p = NULL;
+ const char *q = NULL;
+
+ p = buffer;
+ q = self->buffer + self->offset;
+
+ while (self->length && length < size) {
+ self->offset++;
+ self->length--;
+ length++;
+ if ((*p++ = *q++) == '\n')
+ break;
+ }
+
+ /* If all data in residual buffer consumed then free it. */
+
+ if (!self->length) {
+ free(self->buffer);
+ self->buffer = NULL;
+ }
+ }
+
+ /*
+ * Read in remaining data required to achieve size. Note
+ * that can't just return whatever the first read might
+ * have returned if no EOL encountered as must return
+ * exactly the required size if no EOL unless that would
+ * have exhausted all input.
+ */
+
+ while ((!length || buffer[length-1] != '\n') &&
+ !self->done && length < size) {
+
+ char *p = NULL;
+ char *q = NULL;
+
+ Py_BEGIN_ALLOW_THREADS
+ n = ap_get_client_block(self->r, buffer + length, size - length);
+ Py_END_ALLOW_THREADS
+
+ if (n == -1) {
+ PyErr_SetString(PyExc_IOError, "request data read error");
+ Py_DECREF(result);
+ return NULL;
+ }
+ else if (n == 0) {
+ /* Have exhausted all the available input data. */
+
+ self->done = 1;
+ }
+ else {
+ /*
+ * Search for embedded EOL in what was read and if
+ * found copy any residual into a buffer for use
+ * next time the read functions are called.
+ */
+
+ p = buffer + length;
+ q = p + n;
+
+ while (p != q) {
+ length++;
+ if (*p++ == '\n')
+ break;
+ }
+
+ if (p != q) {
+ self->size = q - p;
+ self->buffer = (char *)malloc(self->size);
+ self->offset = 0;
+ self->length = self->size;
+
+ memcpy(self->buffer, p, self->size);
+ }
+ }
+ }
+
+ /*
+ * Resize the final string. If the size reduction is
+ * by more than 25% of the string size, then Python
+ * will allocate a new block of memory and copy the
+ * data into it.
+ */
+
+ if (length != size) {
+ if (_PyString_Resize(&result, length))
+ return NULL;
+ }
+ }
+ else {
+ /*
+ * Here we have to read in a line but where we have no
+ * idea how long it may be. What we can do first is if
+ * we have any residual data from a previous read
+ * operation, see if it contains an EOL. This means we
+ * have to do a search, but this is likely going to be
+ * better than having to resize and copy memory later on.
+ */
+
+ if (self->buffer && self->length) {
+ const char *p = NULL;
+ const char *q = NULL;
+
+ p = self->buffer + self->offset;
+ q = memchr(p, '\n', self->length);
+
+ if (q)
+ size = q - p;
+ }
+
+ /*
+ * If residual data buffer didn't contain an EOL, all we
+ * can do is allocate a reasonably sized string and if
+ * that isn't big enough keep increasing it in size. For
+ * this we will start out with a buffer 25% greater in
+ * size than what is stored in the residual data buffer
+ * or one the same size as Apache string size, whichever
+ * is greater.
+ */
+
+ if (self->buffer && size < 0) {
+ size = self->length;
+ size = size + (size >> 2);
+ }
+
+ if (size < HUGE_STRING_LEN)
+ size = HUGE_STRING_LEN;
+
+ /* Allocate string of the initial size. */
+
+ result = PyString_FromStringAndSize(NULL, size);
+
+ if (!result)
+ return NULL;
+
+ buffer = PyString_AS_STRING((PyStringObject *)result);
+
+ /* Copy any residual data from use of readline(). */
+
+ if (self->buffer && self->length) {
+ char *p = NULL;
+ const char *q = NULL;
+
+ p = buffer;
+ q = self->buffer + self->offset;
+
+ while (self->length && length < size) {
+ self->offset++;
+ self->length--;
+ length++;
+ if ((*p++ = *q++) == '\n')
+ break;
+ }
+
+ /* If all data in residual buffer consumed then free it. */
+
+ if (!self->length) {
+ free(self->buffer);
+ self->buffer = NULL;
+ }
+ }
+
+ /*
+ * Read in remaining data until find an EOL, or until all
+ * data has been consumed.
+ */
+
+ while ((!length || buffer[length-1] != '\n') && !self->done) {
+
+ char *p = NULL;
+ char *q = NULL;
+
+ Py_BEGIN_ALLOW_THREADS
+ n = ap_get_client_block(self->r, buffer + length, size - length);
+ Py_END_ALLOW_THREADS
+
+ if (n == -1) {
+ PyErr_SetString(PyExc_IOError, "request data read error");
+ Py_DECREF(result);
+ return NULL;
+ }
+ else if (n == 0) {
+ /* Have exhausted all the available input data. */
+
+ self->done = 1;
+ }
+ else {
+ /*
+ * Search for embedded EOL in what was read and if
+ * found copy any residual into a buffer for use
+ * next time the read functions are called.
+ */
+
+ p = buffer + length;
+ q = p + n;
+
+ while (p != q) {
+ length++;
+ if (*p++ == '\n')
+ break;
+ }
+
+ if (p != q) {
+ self->size = q - p;
+ self->buffer = (char *)malloc(self->size);
+ self->offset = 0;
+ self->length = self->size;
+
+ memcpy(self->buffer, p, self->size);
+ }
+
+ if (buffer[length-1] != '\n') {
+ /* Increase size of string and keep going. */
+
+ size = size + (size >> 2);
+
+ if (_PyString_Resize(&result, size))
+ return NULL;
+
+ buffer = PyString_AS_STRING((PyStringObject *)result);
+ }
+ }
+ }
+
+ /*
+ * Resize the final string. If the size reduction is by
+ * more than 25% of the string size, then Python will
+ * allocate a new block of memory and copy the data into
+ * it.
+ */
+
+ if (length != size) {
+ if (_PyString_Resize(&result, length))
+ return NULL;
+ }
+ }
+
+ return result;
+}
+
+static PyObject *Input_readlines(InputObject *self, PyObject *args)
+{
+ long hint = 0;
+ long length = 0;
+
+ PyObject *result = NULL;
+ PyObject *line = NULL;
+ PyObject *rlargs = NULL;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "|l:readlines", &hint))
+ return NULL;
+
+ result = PyList_New(0);
+ if (!result)
+ return NULL;
+
+ rlargs = PyTuple_New(0);
+ if (!rlargs) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ while (1) {
+ int n;
+
+ if (!(line = Input_readline(self, rlargs))) {
+ Py_DECREF(result);
+ result = NULL;
+ break;
+ }
+
+ if ((n = PyString_Size(line)) == 0) {
+ Py_DECREF(line);
+ break;
+ }
+
+ if (PyList_Append(result, line) == -1) {
+ Py_DECREF(line);
+ Py_DECREF(result);
+ result = NULL;
+ break;
+ }
+
+ Py_DECREF(line);
+
+ length += n;
+ if (hint > 0 && length >= hint)
+ break;
+ }
+
+ Py_DECREF(rlargs);
+
+ return result;
+}
+
+static PyMethodDef Input_methods[] = {
+ { "close", (PyCFunction)Input_close, METH_VARARGS, 0},
+ { "read", (PyCFunction)Input_read, METH_VARARGS, 0},
+ { "readline", (PyCFunction)Input_readline, METH_VARARGS, 0},
+ { "readlines", (PyCFunction)Input_readlines, METH_VARARGS, 0},
+ { NULL, NULL}
+};
+
+static PyObject *Input_iter(InputObject *self)
+{
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *Input_iternext(InputObject *self)
+{
+ PyObject *line = NULL;
+ PyObject *rlargs = NULL;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ rlargs = PyTuple_New(0);
+
+ if (!rlargs)
+ return NULL;
+
+ line = Input_readline(self, rlargs);
+
+ Py_DECREF(rlargs);
+
+ if (!line)
+ return NULL;
+
+ if (PyString_GET_SIZE(line) == 0) {
+ PyErr_SetObject(PyExc_StopIteration, Py_None);
+ Py_DECREF(line);
+ return NULL;
+ }
+
+ return line;
+}
+
+static PyTypeObject Input_Type = {
+ /* The ob_type field must be initialized in the module init function
+ * to be portable to Windows without using C++. */
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "mod_wsgi.Input", /*tp_name*/
+ sizeof(InputObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Input_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ (getiterfunc)Input_iter, /*tp_iter*/
+ (iternextfunc)Input_iternext, /*tp_iternext*/
+ Input_methods, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+typedef struct {
+ PyObject_HEAD
+ request_rec *r;
+ WSGIRequestConfig *config;
+ InputObject *input;
+ LogObject *log;
+ int status;
+ const char *status_line;
+ PyObject *headers;
+ PyObject *sequence;
+} AdapterObject;
+
+static PyTypeObject Adapter_Type;
+
+static AdapterObject *newAdapterObject(request_rec *r)
+{
+ AdapterObject *self;
+
+ self = PyObject_New(AdapterObject, &Adapter_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->r = r;
+
+ self->config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ self->status = HTTP_INTERNAL_SERVER_ERROR;
+ self->status_line = NULL;
+ self->headers = NULL;
+ self->sequence = NULL;
+
+ self->input = newInputObject(r);
+ self->log = newLogObject(r, APLOG_ERR);
+
+ return self;
+}
+
+static void Adapter_dealloc(AdapterObject *self)
+{
+ Py_XDECREF(self->headers);
+ Py_XDECREF(self->sequence);
+
+ Py_DECREF(self->input);
+ Py_DECREF(self->log);
+
+ PyObject_Del(self);
+}
+
+static PyObject *Adapter_start(AdapterObject *self, PyObject *args)
+{
+ const char *status = NULL;
+ PyObject *headers = NULL;
+ PyObject *exc_info = NULL;
+
+ char* value = NULL;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "sO|O:start_response",
+ &status, &headers, &exc_info)) {
+ return NULL;
+ }
+
+ if (!PyList_Check(headers)) {
+ PyErr_SetString(PyExc_TypeError, "response headers must be a list");
+ return NULL;
+ }
+
+ if (exc_info && exc_info != Py_None) {
+ if (self->status_line && !self->headers) {
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ if (!PyArg_ParseTuple(exc_info, "OOO", &type, &value, &traceback))
+ return NULL;
+
+ PyErr_Restore(type, value, traceback);
+
+ return NULL;
+ }
+ }
+ else if (self->status_line && !self->headers) {
+ PyErr_SetString(PyExc_RuntimeError, "headers have already been sent");
+ return NULL;
+ }
+
+ self->status_line = apr_pstrdup(self->r->pool, status);
+
+ value = ap_getword(self->r->pool, &status, ' ');
+
+ errno = 0;
+ self->status = strtol(value, &value, 10);
+
+ if (*value || errno == ERANGE) {
+ PyErr_SetString(PyExc_TypeError, "status value is not an integer");
+ return NULL;
+ }
+
+ if (!*status) {
+ PyErr_SetString(PyExc_ValueError, "status message was not supplied");
+ return NULL;
+ }
+
+ Py_XDECREF(self->headers);
+
+ self->headers = headers;
+
+ Py_INCREF(self->headers);
+
+ return PyObject_GetAttrString((PyObject *)self, "write");
+}
+
+static int Adapter_output(AdapterObject *self, const char *data, int length)
+{
+ int i = 0;
+
+ if (!self->status_line) {
+ PyErr_SetString(PyExc_RuntimeError, "response has not been started");
+ return 0;
+ }
+
+ if (self->headers) {
+ int set = 0;
+
+ self->r->status = self->status;
+ self->r->status_line = self->status_line;
+
+ for (i = 0; i < PyList_Size(self->headers); i++) {
+ PyObject *tuple = NULL;
+
+ PyObject *object1 = NULL;
+ PyObject *object2 = NULL;
+
+ char *name = NULL;
+ char *value = NULL;
+
+ tuple = PyList_GetItem(self->headers, i);
+
+ if (!PyTuple_Check(tuple)) {
+ PyErr_Format(PyExc_TypeError, "list of tuple values "
+ "expected, value of type %.200s found",
+ tuple->ob_type->tp_name);
+ return 0;
+ }
+
+ if (PyTuple_Size(tuple) != 2) {
+ PyErr_Format(PyExc_ValueError, "tuple of length 2 "
+ "expected, length is %d",
+ PyTuple_Size(tuple));
+ return 0;
+ }
+
+ object1 = PyTuple_GetItem(tuple, 0);
+ object2 = PyTuple_GetItem(tuple, 1);
+
+ if (!PyString_Check(object1)) {
+ PyErr_SetString(PyExc_TypeError, "expected string object "
+ "for header name");
+ return 0;
+ }
+
+ if (!PyString_Check(object2)) {
+ PyErr_SetString(PyExc_TypeError, "expected string object "
+ "for header value");
+ return 0;
+ }
+
+ if (!PyArg_ParseTuple(tuple, "ss", &name, &value)) {
+ PyErr_SetString(PyExc_TypeError, "header name and value "
+ "must be string objects without null bytes");
+ return 0;
+ }
+
+ if (!strcasecmp(name, "Content-Type")) {
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ self->r->content_type = apr_pstrdup(self->r->pool, value);
+#else
+ /*
+ * In a daemon child process we cannot call the
+ * function ap_set_content_type() as want to
+ * avoid adding any output filters based on the
+ * type of file being served as this will be
+ * done in the main Apache child process which
+ * proxied the request to the daemon process.
+ */
+
+ if (*self->config->process_group)
+ self->r->content_type = apr_pstrdup(self->r->pool, value);
+ else
+ ap_set_content_type(self->r, value);
+#endif
+ }
+ else if (!strcasecmp(name, "Content-Length")) {
+ char *v = value;
+ long l = 0;
+
+ errno = 0;
+ l = strtol(v, &v, 10);
+ if (*v || errno == ERANGE) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid content length");
+ return 0;
+ }
+
+ ap_set_content_length(self->r, l);
+
+ set = 1;
+ }
+ else if (!strcasecmp(name, "WWW-Authenticate")) {
+ apr_table_set(self->r->err_headers_out, name, value);
+ }
+ else {
+ apr_table_set(self->r->headers_out, name, value);
+ }
+ }
+
+ /*
+ * If content length not set and dealing with iterable
+ * response from application, see if response is a
+ * sequence consisting of only one item and if so use
+ * the current length of data being output as the
+ * content length to use.
+ */
+
+ if (!set && self->sequence) {
+ if (PySequence_Check(self->sequence)) {
+ if (PySequence_Size(self->sequence) == 1)
+ ap_set_content_length(self->r, length);
+
+ if (PyErr_Occurred())
+ PyErr_Clear();
+ }
+ }
+
+ ap_send_http_header(self->r);
+
+ Py_DECREF(self->headers);
+ self->headers = NULL;
+ }
+
+ if (length) {
+ ap_rwrite(data, length, self->r);
+ if (!self->config->output_buffering)
+ ap_rflush(self->r);
+ }
+
+ return 1;
+}
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
+static APR_OPTIONAL_FN_TYPE(ssl_is_https) *wsgi_is_https = NULL;
+#endif
+
+static PyObject *Adapter_environ(AdapterObject *self)
+{
+ request_rec *r = NULL;
+
+ PyObject *environ = NULL;
+ PyObject *object = NULL;
+
+ const apr_array_header_t *head = NULL;
+ const apr_table_entry_t *elts = NULL;
+
+ int i = 0;
+
+ const char *scheme = NULL;
+
+ /* Create the WSGI environment dictionary. */
+
+ environ = PyDict_New();
+
+ /* Merge the CGI environment into the WSGI environment. */
+
+ r = self->r;
+
+ head = apr_table_elts(r->subprocess_env);
+ elts = (apr_table_entry_t *)head->elts;
+
+ for (i = 0; i < head->nelts; ++i) {
+ if (elts[i].key) {
+ if (elts[i].val) {
+ object = PyString_FromString(elts[i].val);
+ PyDict_SetItemString(environ, elts[i].key, object);
+ Py_DECREF(object);
+ }
+ else
+ PyDict_SetItemString(environ, elts[i].key, Py_None);
+ }
+ }
+
+ /* Now setup all the WSGI specific environment values. */
+
+ object = Py_BuildValue("(ii)", 1, 0);
+ PyDict_SetItemString(environ, "wsgi.version", object);
+ Py_DECREF(object);
+
+ object = PyBool_FromLong(wsgi_multithread);
+ PyDict_SetItemString(environ, "wsgi.multithread", object);
+ Py_DECREF(object);
+
+ object = PyBool_FromLong(wsgi_multiprocess);
+ PyDict_SetItemString(environ, "wsgi.multiprocess", object);
+ Py_DECREF(object);
+
+ PyDict_SetItemString(environ, "wsgi.run_once", Py_False);
+
+ scheme = apr_table_get(r->subprocess_env, "HTTPS");
+
+ if (scheme && (!strcasecmp(scheme, "On") || !strcmp(scheme, "1"))) {
+ object = PyString_FromString("https");
+ PyDict_SetItemString(environ, "wsgi.url_scheme", object);
+ Py_DECREF(object);
+ }
+ else {
+ object = PyString_FromString("http");
+ PyDict_SetItemString(environ, "wsgi.url_scheme", object);
+ Py_DECREF(object);
+ }
+
+ /*
+ * Setup log object for WSGI errors. Don't decrement
+ * reference to log object as keep reference to it.
+ */
+
+ object = (PyObject *)self->log;
+ PyDict_SetItemString(environ, "wsgi.errors", object);
+
+ /* Setup input object for request content. */
+
+ object = (PyObject *)self->input;
+ PyDict_SetItemString(environ, "wsgi.input", object);
+
+ return environ;
+}
+
+static int Adapter_run(AdapterObject *self, PyObject *object)
+{
+ int result = HTTP_INTERNAL_SERVER_ERROR;
+
+ PyObject *environ = NULL;
+ PyObject *start = NULL;
+ PyObject *args = NULL;
+ PyObject *iterator = NULL;
+ PyObject *close = NULL;
+
+ const char *msg = NULL;
+ int length = 0;
+
+ environ = Adapter_environ(self);
+
+ start = PyObject_GetAttrString((PyObject *)self, "start_response");
+
+ args = Py_BuildValue("(OO)", environ, start);
+
+ self->sequence = PyEval_CallObject(object, args);
+
+ if (self->sequence != NULL) {
+ iterator = PyObject_GetIter(self->sequence);
+
+ if (iterator != NULL) {
+ PyObject *item = NULL;
+
+ while ((item = PyIter_Next(iterator))) {
+ if (!PyString_Check(item)) {
+ PyErr_Format(PyExc_TypeError, "sequence of string "
+ "values expected, value of type %.200s found",
+ item->ob_type->tp_name);
+ break;
+ }
+
+ msg = PyString_AsString(item);
+ length = PyString_Size(item);
+
+ if (!msg || !Adapter_output(self, msg, length)) {
+ Py_DECREF(item);
+ break;
+ }
+
+ Py_DECREF(item);
+ }
+
+ if (!PyErr_Occurred()) {
+ if (Adapter_output(self, "", 0))
+ result = OK;
+ }
+
+ Py_DECREF(iterator);
+ }
+
+ if (PyErr_Occurred())
+ wsgi_log_python_error(self->r, self->log);
+
+ if (PyObject_HasAttrString(self->sequence, "close")) {
+ PyObject *args = NULL;
+ PyObject *data = NULL;
+
+ close = PyObject_GetAttrString(self->sequence, "close");
+
+ args = Py_BuildValue("()");
+ data = PyEval_CallObject(close, args);
+
+ Py_DECREF(args);
+ Py_XDECREF(data);
+ Py_DECREF(close);
+ }
+
+ if (PyErr_Occurred())
+ wsgi_log_python_error(self->r, self->log);
+
+ Py_DECREF(self->sequence);
+
+ self->sequence = NULL;
+ }
+
+ Py_DECREF(args);
+ Py_DECREF(start);
+ Py_DECREF(environ);
+
+ if (PyErr_Occurred())
+ wsgi_log_python_error(self->r, self->log);
+
+ /*
+ * If result indicates an internal server error, then
+ * replace the status line in the request object else
+ * that provided by the application will be what is used
+ * in any error page automatically generated by Apache.
+ */
+
+ if (result == HTTP_INTERNAL_SERVER_ERROR)
+ self->r->status_line = "500 Internal Server Error";
+
+ return result;
+}
+
+static PyObject *Adapter_write(AdapterObject *self, PyObject *args)
+{
+ PyObject *item = NULL;
+ const char *data = NULL;
+ int length = 0;
+
+ if (!self->r) {
+ PyErr_SetString(PyExc_RuntimeError, "request object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "S:write", &item))
+ return NULL;
+
+ data = PyString_AsString(item);
+ length = PyString_Size(item);
+
+ if (!Adapter_output(self, data, length))
+ return NULL;
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef Adapter_methods[] = {
+ { "start_response", (PyCFunction)Adapter_start, METH_VARARGS, 0},
+ { "write", (PyCFunction)Adapter_write, METH_VARARGS, 0},
+ { NULL, NULL}
+};
+
+static PyTypeObject Adapter_Type = {
+ /* The ob_type field must be initialized in the module init function
+ * to be portable to Windows without using C++. */
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "mod_wsgi.Adapter", /*tp_name*/
+ sizeof(AdapterObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Adapter_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ Adapter_methods, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/* Restricted object to stop access to STDIN/STDOUT. */
+
+typedef struct {
+ PyObject_HEAD
+ const char *s;
+} RestrictedObject;
+
+static PyTypeObject Restricted_Type;
+
+static RestrictedObject *newRestrictedObject(const char *s)
+{
+ RestrictedObject *self;
+
+ self = PyObject_New(RestrictedObject, &Restricted_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->s = s;
+
+ return self;
+}
+
+static void Restricted_dealloc(RestrictedObject *self)
+{
+ PyObject_Del(self);
+}
+
+static PyObject *Restricted_getattr(RestrictedObject *self, char *name)
+{
+ PyErr_Format(PyExc_IOError, "%s access restricted by mod_wsgi", self->s);
+
+ return NULL;
+}
+
+static PyTypeObject Restricted_Type = {
+ /* The ob_type field must be initialized in the module init function
+ * to be portable to Windows without using C++. */
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "mod_wsgi.Restricted", /*tp_name*/
+ sizeof(RestrictedObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Restricted_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ (getattrfunc)Restricted_getattr, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/* Function to restrict access to use of signal(). */
+
+static PyObject *wsgi_signal_intercept(PyObject *self, PyObject *args)
+{
+ PyObject *h = NULL;
+ int n = 0;
+
+ PyObject *m = NULL;
+
+ if (!PyArg_ParseTuple(args, "iO:signal", &n, &h))
+ return NULL;
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
+ "mod_wsgi (pid=%d): Callback registration for "
+ "signal %d ignored.", getpid(), n);
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_stack");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+ Py_INCREF(o);
+ log = (PyObject *)newLogObject(NULL, APLOG_WARNING);
+ args = Py_BuildValue("(OOO)", Py_None, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_XDECREF(result);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ }
+ Py_DECREF(o);
+ }
+
+ Py_INCREF(m);
+
+ Py_INCREF(h);
+
+ return h;
+}
+
+static PyMethodDef wsgi_signal_method[] = {
+ { "signal", (PyCFunction)wsgi_signal_intercept, METH_VARARGS, 0 },
+ { NULL, NULL }
+};
+
+/* Wrapper around Python interpreter instances. */
+
+typedef struct {
+ PyObject_HEAD
+ char *name;
+ PyInterpreterState *interp;
+ int owner;
+} InterpreterObject;
+
+static PyTypeObject Interpreter_Type;
+
+static InterpreterObject *newInterpreterObject(const char *name,
+ PyInterpreterState *interp)
+{
+ InterpreterObject *self;
+ PyThreadState *tstate = NULL;
+ PyThreadState *save_tstate = NULL;
+ PyObject *module = NULL;
+ PyObject *object = NULL;
+ PyObject *item = NULL;
+
+ self = PyObject_New(InterpreterObject, &Interpreter_Type);
+ if (self == NULL)
+ return NULL;
+
+ /* Remember active thread state so can restore it. */
+
+ save_tstate = PyThreadState_Swap(NULL);
+
+ /* Save away the interpreter name. */
+
+ self->name = strdup(name);
+
+ if (interp) {
+ /*
+ * Interpreter provided to us so will not be
+ * responsible for deleting it later.
+ */
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Attach interpreter '%s'.",
+ getpid(), name);
+
+ self->interp = interp;
+ self->owner = 0;
+
+ /*
+ * Need though now to create a thread state
+ * against the interpreter so we can preload
+ * it with our modules and fixups.
+ */
+
+ tstate = PyThreadState_New(self->interp);
+ PyThreadState_Swap(tstate);
+ }
+ else {
+ /*
+ * Create the interpreter. If creation of the
+ * interpreter fails it will restore the
+ * existing active thread state for us so don't
+ * need to worry about it in that case.
+ */
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Create interpreter '%s'.",
+ getpid(), name);
+
+ tstate = Py_NewInterpreter();
+
+ if (!tstate) {
+ PyErr_SetString(PyExc_RuntimeError, "Py_NewInterpreter() failed");
+
+ Py_DECREF(self);
+
+ return NULL;
+ }
+
+ self->interp = tstate->interp;
+ self->owner = 1;
+ }
+
+ /*
+ * Create mod_wsgi Python module. Only put the
+ * version in this for now. Might use it later
+ * on for interpreter specific information.
+ */
+
+ module = PyImport_AddModule("mod_wsgi");
+
+ PyModule_AddObject(module, "version", Py_BuildValue("(ii)",
+ MOD_WSGI_MAJORVERSION_NUMBER,
+ MOD_WSGI_MINORVERSION_NUMBER));
+
+ /*
+ * Install restricted objects for STDIN and STDOUT,
+ * or log object for STDOUT as appropriate. Don't do
+ * this if not running on Win32 and we believe we
+ * are running in single process mode, otherwise
+ * it prevents use of interactive debuggers such as
+ * the 'pdb' module.
+ */
+
+ object = (PyObject *)newLogObject(NULL, APLOG_ERR);
+ PySys_SetObject("stderr", object);
+ Py_DECREF(object);
+
+#ifndef WIN32
+ if (wsgi_parent_pid != getpid()) {
+#endif
+ if (wsgi_server_config->restrict_stdout != 0) {
+ object = (PyObject *)newRestrictedObject("sys.stdout");
+ PySys_SetObject("stdout", object);
+ Py_DECREF(object);
+ }
+ else {
+ object = (PyObject *)newLogObject(NULL, APLOG_ERR);
+ PySys_SetObject("stdout", object);
+ Py_DECREF(object);
+ }
+
+ if (wsgi_server_config->restrict_stdin != 0) {
+ object = (PyObject *)newRestrictedObject("sys.stdin");
+ PySys_SetObject("stdin", object);
+ Py_DECREF(object);
+ }
+#ifndef WIN32
+ }
+#endif
+
+ /*
+ * Set sys.argv to one element list to fake out
+ * modules that look there for Python command
+ * line arguments as appropriate.
+ */
+
+ object = PyList_New(0);
+ item = PyString_FromString("mod_wsgi");
+ PyList_Append(object, item);
+ PySys_SetObject("argv", object);
+ Py_DECREF(item);
+ Py_DECREF(object);
+
+ /*
+ * Install intercept for signal handler registration
+ * if appropriate.
+ */
+
+ if (wsgi_server_config->restrict_signal != 0) {
+ module = PyImport_ImportModule("signal");
+ PyModule_AddObject(module, "signal", PyCFunction_New(
+ &wsgi_signal_method[0], NULL));
+ Py_DECREF(module);
+ }
+
+ PyThreadState_Clear(tstate);
+ PyThreadState_Swap(save_tstate);
+ PyThreadState_Delete(tstate);
+
+ return self;
+}
+
+static void Interpreter_dealloc(InterpreterObject *self)
+{
+ PyThreadState *tstate = NULL;
+ PyObject *exitfunc = NULL;
+ PyObject *module = NULL;
+
+ /*
+ * We should always enter here with the Python GIL held, but
+ * there will be no active thread state. Note that it should
+ * be safe to always assume that the simplified GIL state
+ * API lock was originally unlocked as always calling in
+ * from an Apache thread outside of Python.
+ */
+
+ PyEval_ReleaseLock();
+
+ if (*self->name) {
+ tstate = PyThreadState_New(self->interp);
+ PyEval_AcquireThread(tstate);
+ }
+ else
+ PyGILState_Ensure();
+
+ if (self->owner) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Destroy interpreter '%s'.",
+ getpid(), self->name);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Cleanup interpreter '%s'.",
+ getpid(), self->name);
+ }
+
+ /*
+ * Because the thread state we are using was created outside
+ * of any Python code and is not the same as the Python main
+ * thread, there is no record of it within the 'threading'
+ * module. We thus need to call the 'currentThread()'
+ * function of the 'threading' module to force it to create
+ * a thread handle for the thread. If we do not do this,
+ * then the 'threading' modules exit function will always
+ * fail because it will not be able to find a handle for
+ * this thread.
+ */
+
+ module = PyImport_ImportModule("threading");
+
+ if (!module)
+ PyErr_Clear();
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *func = NULL;
+ PyObject *handle = NULL;
+
+ dict = PyModule_GetDict(module);
+ func = PyDict_GetItemString(dict, "currentThread");
+ if (func) {
+ PyObject *args = NULL;
+ PyObject *res = NULL;
+ Py_INCREF(func);
+ res = PyEval_CallObject(func, (PyObject *)NULL);
+ if (!res) {
+ PyErr_Clear();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(func);
+ }
+ }
+
+ /*
+ * In Python 2.5.1 an exit function is no longer used to
+ * shutdown and wait on non daemon threads which were created
+ * from Python code. Instead, in Py_Main() it explicitly
+ * calls 'threading._shutdown()'. Thus need to emulate this
+ * behaviour for those versions.
+ */
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *func = NULL;
+ PyObject *handle = NULL;
+
+ dict = PyModule_GetDict(module);
+ func = PyDict_GetItemString(dict, "_shutdown");
+ if (func) {
+ PyObject *args = NULL;
+ PyObject *res = NULL;
+ Py_INCREF(func);
+ res = PyEval_CallObject(func, (PyObject *)NULL);
+
+ if (res == NULL) {
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ "mod_wsgi (pid=%d): Exception occurred within "
+ "threading._shutdown().", getpid());
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ log = (PyObject *)newLogObject(NULL, APLOG_ERR);
+ args = Py_BuildValue("(OOOOO)", type, value,
+ traceback, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ }
+ Py_DECREF(o);
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ if (Py_FlushLine())
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_DECREF(m);
+ }
+
+ Py_XDECREF(res);
+ Py_DECREF(func);
+ }
+ }
+
+ /* Finally done with 'threading' module. */
+
+ if (module)
+ Py_DECREF(module);
+
+ /* Invoke exit functions by calling sys.exitfunc(). */
+
+ exitfunc = PySys_GetObject("exitfunc");
+
+ if (exitfunc) {
+ PyObject *res = NULL;
+ Py_INCREF(exitfunc);
+ PySys_SetObject("exitfunc", (PyObject *)NULL);
+ res = PyEval_CallObject(exitfunc, (PyObject *)NULL);
+
+ if (res == NULL) {
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ "mod_wsgi (pid=%d): SystemExit exception "
+ "raised by sys.exitfunc() ignored.", getpid());
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ "mod_wsgi (pid=%d): Exception occurred within "
+ "sys.exitfunc().", getpid());
+ }
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ log = (PyObject *)newLogObject(NULL, APLOG_ERR);
+ args = Py_BuildValue("(OOOOO)", type, value,
+ traceback, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ }
+ Py_DECREF(o);
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ if (Py_FlushLine())
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_DECREF(m);
+ }
+
+ Py_XDECREF(res);
+ Py_DECREF(exitfunc);
+ }
+
+ /* If we own it, we destroy it. */
+
+ if (!self->owner) {
+ if (*self->name) {
+ tstate = PyThreadState_Get();
+
+ PyThreadState_Clear(tstate);
+ PyEval_ReleaseThread(tstate);
+ PyThreadState_Delete(tstate);
+ }
+ else
+ PyGILState_Release(PyGILState_UNLOCKED);
+
+ PyEval_AcquireLock();
+ }
+ else
+ Py_EndInterpreter(tstate);
+
+ free(self->name);
+
+ PyObject_Del(self);
+}
+
+static PyTypeObject Interpreter_Type = {
+ /* The ob_type field must be initialized in the module init function
+ * to be portable to Windows without using C++. */
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "mod_wsgi.Interpreter", /*tp_name*/
+ sizeof(InterpreterObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Interpreter_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/*
+ * Startup and shutdown of Python interpreter. In mod_wsgi if
+ * the Python interpreter hasn't been initialised by another
+ * Apache module such as mod_python, we will take control and
+ * initialise it. Need to remember that we initialised Python as
+ * in doing that we also take responsibility for performing
+ * special Python fixups after Apache is forked and child
+ * process has run.
+ */
+
+static int wsgi_python_initialized = 0;
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+static apr_pool_t *wsgi_server_pool = NULL;
+#endif
+
+static void wsgi_python_version(void)
+{
+ const char *compile = PY_VERSION;
+ const char *dynamic = 0;
+
+ dynamic = strtok((char *)Py_GetVersion(), " ");
+
+ if (strcmp(compile, dynamic) != 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
+ "mod_wsgi: Compiled for Python/%s.", compile);
+ ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
+ "mod_wsgi: Runtime using Python/%s.", dynamic);
+ ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
+ "mod_wsgi: Python module path '%s'.",
+ Py_GetPath());
+ }
+}
+
+static apr_status_t wsgi_python_term(void *data)
+{
+ PyInterpreterState *interp = NULL;
+ PyThreadState *tstate = NULL;
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Terminating Python.", getpid());
+
+ PyEval_AcquireLock();
+
+ interp = PyInterpreterState_Head();
+ while (interp->next)
+ interp = interp->next;
+
+ tstate = PyThreadState_New(interp);
+ PyThreadState_Swap(tstate);
+
+ Py_Finalize();
+
+ PyThreadState_Swap(NULL);
+
+ PyEval_ReleaseLock();
+
+ wsgi_python_initialized = 0;
+
+ return APR_SUCCESS;
+}
+
+static void wsgi_python_init(apr_pool_t *p)
+{
+ WSGIServerConfig *config = NULL;
+
+#if defined(DARWIN) && (AP_SERVER_MAJORVERSION_NUMBER < 2)
+ static int initialized = 0;
+#else
+ static int initialized = 1;
+#endif
+
+ /*
+ * Check that the version of Python found at
+ * runtime is what was used at compilation.
+ */
+
+ wsgi_python_version();
+
+ /* Perform initialisation if required. */
+
+ if (!Py_IsInitialized() || !initialized) {
+ char buffer[256];
+ const char *token = NULL;
+ const char *version = NULL;
+
+ /* Check for Python paths and optimisation flag. */
+
+ if (wsgi_server_config->python_optimize > 0)
+ Py_OptimizeFlag = wsgi_server_config->python_optimize;
+ else
+ Py_OptimizeFlag = 0;
+
+ if (wsgi_server_config->python_executable)
+ Py_SetProgramName((char *)wsgi_server_config->python_executable);
+
+ if (wsgi_server_config->python_home)
+ Py_SetPythonHome((char *)wsgi_server_config->python_home);
+
+#ifndef WIN32
+ if (wsgi_server_config->python_path) {
+ putenv(apr_psprintf(p, "PYTHONPATH=%s",
+ wsgi_server_config->python_path));
+ }
+#endif
+
+ /* Initialise Python. */
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi: Initializing Python.");
+
+ initialized = 1;
+
+ Py_Initialize();
+
+ /* Record version string with Apache. */
+
+ version = Py_GetVersion();
+
+ token = version;
+ while (*token && *token != ' ')
+ token++;
+
+ strcpy(buffer, "Python/");
+ strncat(buffer, version, token - version);
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ ap_add_version_component(buffer);
+#else
+ ap_add_version_component(p, buffer);
+#endif
+
+ /* Initialise threading. */
+
+ PyEval_InitThreads();
+ PyEval_ReleaseLock();
+
+ PyThreadState_Swap(NULL);
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+ /*
+ * Trigger destruction of the Python interpreter in the
+ * parent process on a restart. Can only do this with
+ * Apache 2.0 and later.
+ */
+
+ apr_pool_create(&wsgi_server_pool, p);
+ apr_pool_tag(wsgi_server_pool, "mod_wsgi server pool");
+
+ apr_pool_cleanup_register(wsgi_server_pool, NULL, wsgi_python_term,
+ apr_pool_cleanup_null);
+#endif
+
+ wsgi_python_initialized = 1;
+ }
+}
+
+/*
+ * Functions for acquiring and subsequently releasing desired
+ * Python interpreter instance. When acquiring the interpreter
+ * a new interpreter instance will be created on demand if it
+ * is required. The Python GIL will be held on return when the
+ * interpreter is acquired.
+ */
+
+#if APR_HAS_THREADS
+static apr_thread_mutex_t* wsgi_interp_lock = NULL;
+static apr_thread_mutex_t* wsgi_module_lock = NULL;
+#endif
+
+static PyObject *wsgi_interpreters = NULL;
+
+static InterpreterObject *wsgi_acquire_interpreter(const char *name)
+{
+ PyThreadState *tstate = NULL;
+ PyInterpreterState *interp = NULL;
+ InterpreterObject *handle = NULL;
+
+ /*
+ * In a multithreaded MPM must protect the
+ * interpreters table. This lock is only needed to
+ * avoid a secondary thread coming in and creating
+ * the same interpreter if Python releases the GIL
+ * when an interpreter is being created. When
+ * are removing an interpreter from the table in
+ * preparation for reloading, don't need to have
+ * it.
+ */
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(wsgi_interp_lock);
+#endif
+
+ /*
+ * This function should never be called when the
+ * Python GIL is held, so need to acquire it.
+ */
+
+ PyEval_AcquireLock();
+
+ /*
+ * Check if already have interpreter instance and
+ * if not need to create one.
+ */
+
+ handle = (InterpreterObject *)PyDict_GetItemString(wsgi_interpreters,
+ name);
+
+ if (!handle) {
+ handle = newInterpreterObject(name, NULL);
+
+ if (!handle)
+ return NULL;
+
+ PyDict_SetItemString(wsgi_interpreters, name, (PyObject *)handle);
+ }
+ else
+ Py_INCREF(handle);
+
+ interp = handle->interp;
+
+ /*
+ * Create new thread state object. We should only be
+ * getting called where no current active thread
+ * state, so no need to remember the old one. When
+ * working with the main Python interpreter always
+ * use the simplified API for GIL locking so any
+ * extension modules which use that will still work.
+ */
+
+ PyEval_ReleaseLock();
+
+ if (*name) {
+ tstate = PyThreadState_New(interp);
+ PyEval_AcquireThread(tstate);
+ }
+ else
+ PyGILState_Ensure();
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(wsgi_interp_lock);
+#endif
+
+ return handle;
+}
+
+static void wsgi_remove_interpreter(const char *name)
+{
+ PyDict_DelItemString(wsgi_interpreters, name);
+}
+
+static void wsgi_release_interpreter(InterpreterObject *handle)
+{
+ PyThreadState *tstate = NULL;
+
+ /*
+ * Need to release and destroy the thread state that
+ * was created against the interpreter. This will
+ * release the GIL. Note that it should be safe to
+ * always assume that the simplified GIL state API
+ * lock was originally unlocked as always calling in
+ * from an Apache thread when we acquire the
+ * interpreter in the first place.
+ */
+
+ if (*handle->name) {
+ tstate = PyThreadState_Get();
+
+ PyThreadState_Clear(tstate);
+ PyEval_ReleaseThread(tstate);
+ PyThreadState_Delete(tstate);
+ }
+ else
+ PyGILState_Release(PyGILState_UNLOCKED);
+
+ /*
+ * Need to reacquire the Python GIL just so we can
+ * decrement our reference count to the interpreter
+ * itself. If the interpreter has since been removed
+ * from the table of interpreters this will result
+ * in its destruction if its the last reference.
+ */
+
+ PyEval_AcquireLock();
+
+ Py_DECREF(handle);
+
+ PyEval_ReleaseLock();
+}
+
+/*
+ * Code for importing a module from source by absolute path.
+ */
+
+static PyObject *wsgi_load_source(request_rec *r, const char *name, int found)
+{
+ WSGIRequestConfig *config = NULL;
+
+ FILE *fp = NULL;
+ PyObject *m = NULL;
+ PyObject *co = NULL;
+ struct _node *n = NULL;
+
+ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ if (found) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ "mod_wsgi (pid=%d, process='%s', application='%s'): "
+ "Reloading WSGI script '%s'.", getpid(),
+ config->process_group, config->application_group,
+ r->filename);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ "mod_wsgi (pid=%d, process='%s', application='%s'): "
+ "Loading WSGI script '%s'.", getpid(),
+ config->process_group, config->application_group,
+ r->filename);
+ }
+
+ if (!(fp = fopen(r->filename, "r"))) {
+ PyErr_SetFromErrno(PyExc_IOError);
+ return NULL;
+ }
+
+ n = PyParser_SimpleParseFile(fp, r->filename, Py_file_input);
+
+ fclose(fp);
+
+ if (!n)
+ return NULL;
+
+ co = (PyObject *)PyNode_Compile(n, r->filename);
+ PyNode_Free(n);
+
+ if (co)
+ m = PyImport_ExecCodeModuleEx((char *)name, co, r->filename);
+
+ Py_XDECREF(co);
+
+ if (m) {
+ PyObject *object = NULL;
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ object = PyLong_FromLongLong(r->finfo.st_mtime);
+#else
+ object = PyLong_FromLongLong(r->finfo.mtime);
+#endif
+ PyModule_AddObject(m, "__mtime__", object);
+ }
+
+ return m;
+}
+
+static int wsgi_reload_required(request_rec *r, PyObject *module)
+{
+ PyObject *dict = NULL;
+ PyObject *object = NULL;
+ apr_time_t mtime = 0;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "__mtime__");
+
+ if (object) {
+ mtime = PyLong_AsLongLong(object);
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ if (mtime != r->finfo.st_mtime)
+ return 1;
+#else
+ if (mtime != r->finfo.mtime)
+ return 1;
+#endif
+ }
+ else
+ return 1;
+
+ return 0;
+}
+
+static char *wsgi_module_name(request_rec *r)
+{
+ WSGIRequestConfig *config = NULL;
+
+ char *hash = NULL;
+ char *file = NULL;
+
+ /* Grab request configuration. */
+
+ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ /*
+ * Calculate a name for the module using the MD5 of its full
+ * pathname. This is so that different code files with the
+ * same basename are still considered unique. Note that where
+ * we believe a case insensitive file system is being used,
+ * we always change the file name to lower case so that use
+ * of different case in name doesn't resultant in duplicate
+ * modules being loaded for the same file.
+ */
+
+ file = r->filename;
+
+ if (!config->case_sensitivity) {
+ file = apr_pstrdup(r->pool, file);
+ ap_str_tolower(file);
+ }
+
+ hash = ap_md5(r->pool, (const unsigned char *)file);
+ return apr_pstrcat(r->pool, "_mod_wsgi_", hash, NULL);
+}
+
+static int wsgi_execute_script(request_rec *r)
+{
+ WSGIRequestConfig *config = NULL;
+
+ InterpreterObject *interp = NULL;
+ PyObject *modules = NULL;
+ PyObject *module = NULL;
+ char *name = NULL;
+ int found = 0;
+
+ int status;
+
+ /* Grab request configuration. */
+
+ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ /*
+ * Acquire the desired python interpreter. Once this is done
+ * it is safe to start manipulating python objects.
+ */
+
+ interp = wsgi_acquire_interpreter(config->application_group);
+
+ if (!interp) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
+ getpid(), config->application_group);
+
+ if (Py_FlushLine())
+ PyErr_Clear();
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Calculate the Python module name to be used for script. */
+
+ name = wsgi_module_name(r);
+
+ /*
+ * Use a lock around the check to see if the module is
+ * already loaded and the import of the module to prevent
+ * two request handlers trying to import the module at the
+ * same time.
+ */
+
+#if APR_HAS_THREADS
+ Py_BEGIN_ALLOW_THREADS
+ apr_thread_mutex_lock(wsgi_module_lock);
+ Py_END_ALLOW_THREADS
+#endif
+
+ modules = PyImport_GetModuleDict();
+ module = PyDict_GetItemString(modules, name);
+
+ Py_XINCREF(module);
+
+ if (module)
+ found = 1;
+
+ /*
+ * If script reloading is enabled and the module exists, see
+ * if it has been modified since the last time it was
+ * accessed. If it has, interpreter reloading is enabled
+ * and it is not the main Python interpreter, we need to
+ * trigger destruction of the interpreter by removing it
+ * from the interpreters table, releasing it and then
+ * reacquiring it. If just script reloading is enabled,
+ * remove the module from the modules dictionary before
+ * reloading it again. If code is executing within the
+ * module at the time, the callers reference count on the
+ * module should ensure it isn't actually destroyed until it
+ * is finished.
+ */
+
+ if (module && config->script_reloading) {
+ if (wsgi_reload_required(r, module)) {
+ /* Discard reference to loaded module. */
+
+ Py_DECREF(module);
+ module = NULL;
+
+ /* Check for interpreter or module reloading. */
+
+ if (config->reload_mechanism == 1 && *config->application_group) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ "mod_wsgi (pid=%d): Force reload of "
+ "interpreter '%s'.", getpid(),
+ config->application_group);
+
+ /* Remove interpreter from set of interpreters. */
+
+ wsgi_remove_interpreter(config->application_group);
+
+ /*
+ * Release the interpreter. If nothing else is
+ * making use of it, this will cause it to be
+ * destroyed immediately. If something was using
+ * it then it will hang around till the other
+ * handler has finished using it. This will
+ * leave us without even the Python GIL being
+ * locked.
+ */
+
+ wsgi_release_interpreter(interp);
+
+ /*
+ * Now reacquire the interpreter. Because we
+ * removed it from the interpreter set above,
+ * this will result in it being recreated. This
+ * also reacquires the Python GIL for us.
+ */
+
+ interp = wsgi_acquire_interpreter(config->application_group);
+
+ if (!interp) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ "mod_wsgi (pid=%d): Cannot acquire "
+ "interpreter '%s'.", getpid(),
+ config->application_group);
+
+ if (Py_FlushLine())
+ PyErr_Clear();
+
+#if APR_HAS_THREADS
+ Py_BEGIN_ALLOW_THREADS
+ apr_thread_mutex_unlock(wsgi_module_lock);
+ Py_END_ALLOW_THREADS
+#endif
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ found = 0;
+ }
+ else
+ PyDict_DelItemString(modules, name);
+ }
+ }
+
+ /* Load module if not already loaded. */
+
+ if (!module)
+ module = wsgi_load_source(r, name, found);
+
+ /* Safe now to release the module lock. */
+
+#if APR_HAS_THREADS
+ Py_BEGIN_ALLOW_THREADS
+ apr_thread_mutex_unlock(wsgi_module_lock);
+ Py_END_ALLOW_THREADS
+#endif
+
+ /* Assume an internal server error unless everything okay. */
+
+ status = HTTP_INTERNAL_SERVER_ERROR;
+
+ /* Determine if script is executable and execute it. */
+
+ if (module) {
+ PyObject *module_dict = NULL;
+ PyObject *object = NULL;
+
+ module_dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(module_dict, config->callable_object);
+
+ if (object) {
+ AdapterObject *adapter = NULL;
+ adapter = newAdapterObject(r);
+
+ Py_INCREF(object);
+
+ if (adapter) {
+ status = Adapter_run(adapter, object);
+
+ /*
+ * Wipe out references to Apache request object
+ * held by Python objects, so can detect when an
+ * application holds on to the transient Python
+ * objects beyond the life of the request and
+ * thus raise an exception if they are used.
+ */
+
+ adapter->r = NULL;
+ adapter->input->r = NULL;
+ adapter->log->expired = 1;
+ }
+
+ Py_XDECREF((PyObject *)adapter);
+
+ Py_DECREF(object);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ "mod_wsgi (pid=%d): Target WSGI script '%s' does "
+ "not contain WSGI application '%s'.",
+ getpid(), r->filename, apr_pstrcat(r->pool,
+ r->filename, "::", config->callable_object, NULL));
+
+ status = HTTP_NOT_FOUND;
+ }
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ "mod_wsgi (pid=%d): Target WSGI script '%s' cannot "
+ "be loaded as Python module.", getpid(), r->filename);
+ }
+
+ /* Log any details of exceptions if execution failed. */
+
+ if (PyErr_Occurred()) {
+ LogObject *log;
+ log = newLogObject(r, APLOG_ERR);
+ wsgi_log_python_error(r, log);
+ Py_DECREF(log);
+ }
+
+ /* Cleanup and release interpreter, */
+
+ Py_XDECREF(module);
+
+ wsgi_release_interpreter(interp);
+
+ return status;
+}
+
+/*
+ * Apache child process initialision and cleanup. Initialise
+ * global table containing Python interpreter instances and
+ * cache reference to main interpreter. Also register cleanup
+ * function to delete interpreter on process shutdown.
+ */
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+static void wsgi_python_child_cleanup(void *data)
+#else
+static apr_status_t wsgi_python_child_cleanup(void *data)
+#endif
+{
+ PyObject *interp = NULL;
+
+ /* In a multithreaded MPM must protect table. */
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(wsgi_interp_lock);
+#endif
+
+ PyEval_AcquireLock();
+
+ /*
+ * Extract a handle to the main Python interpreter from
+ * interpreters dictionary as want to process that one last.
+ */
+
+ interp = PyDict_GetItemString(wsgi_interpreters, "");
+ Py_INCREF(interp);
+
+ /*
+ * Remove all items from interpreters dictionary. This will
+ * have side affect of calling any exit functions and
+ * destroying interpreters we own.
+ */
+
+ PyDict_Clear(wsgi_interpreters);
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(wsgi_interp_lock);
+#endif
+
+ /*
+ * Now we decrement reference on handle for main Python
+ * interpreter. This only causes exit functions to be called
+ * and doesn't result in interpreter being destroyed as we
+ * we didn't previously mark ourselves as the owner of the
+ * interpreter. Note that when Python as a whole is later
+ * being destroyed it will also call exit functions, but by
+ * then the exit function registrations have been removed
+ * and so they will not actually be run a second time.
+ */
+
+ Py_DECREF(interp);
+
+ PyEval_ReleaseLock();
+
+ /*
+ * Destroy Python itself including the main interpreter.
+ * If mod_python is being loaded it is left to mod_python to
+ * destroy mod_python, although it currently doesn't do so.
+ */
+
+ if (wsgi_python_initialized)
+ wsgi_python_term(0);
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+ return APR_SUCCESS;
+#endif
+}
+
+static void wsgi_python_child_init(apr_pool_t *p)
+{
+ PyInterpreterState *interp = NULL;
+ PyThreadState *tstate = NULL;
+ PyThreadState *save_tstate = NULL;
+
+ PyObject *object = NULL;
+
+ /* Working with Python, so must acquire GIL. */
+
+ PyEval_AcquireLock();
+
+ /*
+ * Get a reference to the main Python interpreter created
+ * and associate our own thread state against it.
+ */
+
+ interp = PyInterpreterState_Head();
+ while (interp->next)
+ interp = interp->next;
+
+ tstate = PyThreadState_New(interp);
+ save_tstate = PyThreadState_Swap(tstate);
+
+ /*
+ * Trigger any special Python stuff required after a fork.
+ * Only do this though if we were responsible for the
+ * initialisation of the Python interpreter in the first
+ * place to avoid it being done multiple times.
+ */
+
+ if (wsgi_python_initialized)
+ PyOS_AfterFork();
+
+ /* Finalise any Python objects required by child process. */
+
+ PyType_Ready(&Log_Type);
+ PyType_Ready(&Input_Type);
+ PyType_Ready(&Adapter_Type);
+ PyType_Ready(&Restricted_Type);
+ PyType_Ready(&Interpreter_Type);
+
+ /* Initialise Python interpreter instance table and lock. */
+
+ wsgi_interpreters = PyDict_New();
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_create(&wsgi_interp_lock, APR_THREAD_MUTEX_UNNESTED, p);
+ apr_thread_mutex_create(&wsgi_module_lock, APR_THREAD_MUTEX_UNNESTED, p);
+#endif
+
+ /*
+ * Cache a reference to the first Python interpreter
+ * instance. This interpreter is special as some third party
+ * Python modules will only work when used from within this
+ * interpreter. This is generally when they use the Python
+ * simplified GIL API or otherwise don't use threading API
+ * properly.
+ */
+
+ object = (PyObject *)newInterpreterObject("", interp);
+ PyDict_SetItemString(wsgi_interpreters, "", object);
+ Py_DECREF(object);
+
+ /* Restore the prior thread state and release the GIL. */
+
+ PyThreadState_Clear(tstate);
+ PyThreadState_Swap(save_tstate);
+ PyThreadState_Delete(tstate);
+
+ PyEval_ReleaseLock();
+
+ /* Register cleanups to performed on process shutdown. */
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ ap_register_cleanup(p, NULL, wsgi_python_child_cleanup,
+ ap_null_cleanup);
+#else
+ apr_pool_cleanup_register(p, NULL, wsgi_python_child_cleanup,
+ apr_pool_cleanup_null);
+#endif
+}
+
+/* The processors for directives. */
+
+static const char *wsgi_add_script_alias(cmd_parms *cmd, void *mconfig,
+ const char *l, const char *a)
+{
+ WSGIServerConfig *config = NULL;
+ WSGIAliasEntry *entry = NULL;
+
+ config = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ if (!config->alias_list) {
+ config->alias_list = apr_array_make(config->pool, 20,
+ sizeof(WSGIAliasEntry));
+ }
+
+ entry = (WSGIAliasEntry *)apr_array_push(config->alias_list);
+
+ if (cmd->info) {
+ entry->regexp = ap_pregcomp(cmd->pool, l, AP_REG_EXTENDED);
+ if (!entry->regexp)
+ return "Regular expression could not be compiled.";
+ }
+
+ entry->location = l;
+ entry->application = a;
+
+ return NULL;
+}
+
+static const char *wsgi_set_python_optimize(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->python_optimize = atoi(f);
+
+ return NULL;
+}
+
+static const char *wsgi_set_python_executable(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->python_executable = f;
+
+ return NULL;
+}
+
+static const char *wsgi_set_python_home(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->python_home = f;
+
+ return NULL;
+}
+
+static const char *wsgi_set_python_path(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->python_path = f;
+
+ return NULL;
+}
+
+static const char *wsgi_set_restrict_stdin(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->restrict_stdin = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->restrict_stdin = 1;
+ else
+ return "WSGIRestrictStdin must be one of: Off | On";
+
+ return NULL;
+}
+
+static const char *wsgi_set_restrict_stdout(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->restrict_stdout = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->restrict_stdout = 1;
+ else
+ return "WSGIRestrictStdout must be one of: Off | On";
+
+ return NULL;
+}
+
+static const char *wsgi_set_restrict_signal(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->restrict_signal = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->restrict_signal = 1;
+ else
+ return "WSGIRestrictSignal must be one of: Off | On";
+
+ return NULL;
+}
+
+static const char *wsgi_set_restrict_process(cmd_parms *cmd, void *mconfig,
+ const char *args)
+{
+ apr_table_t *index = apr_table_make(cmd->pool, 5);
+
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ dconfig->restrict_process = index;
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ sconfig->restrict_process = index;
+ }
+
+ while (*args) {
+ char const *option;
+
+ option = ap_getword_conf(cmd->temp_pool, &args);
+
+ if (!strcmp(option, "%{GLOBAL}"))
+ option = "";
+
+ apr_table_setn(index, option, option);
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_process_group(cmd_parms *cmd, void *mconfig,
+ const char *n)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+ dconfig->process_group = n;
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+ sconfig->process_group = n;
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_application_group(cmd_parms *cmd, void *mconfig,
+ const char *n)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+ dconfig->application_group = n;
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+ sconfig->application_group = n;
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_callable_object(cmd_parms *cmd, void *mconfig,
+ const char *n)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+ dconfig->callable_object = n;
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+ sconfig->callable_object = n;
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_pass_authorization(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ if (strcasecmp(f, "Off") == 0)
+ dconfig->pass_authorization = 0;
+ else if (strcasecmp(f, "On") == 0)
+ dconfig->pass_authorization = 1;
+ else
+ return "WSGIPassAuthorization must be one of: Off | On";
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->pass_authorization = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->pass_authorization = 1;
+ else
+ return "WSGIPassAuthorization must be one of: Off | On";
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_script_reloading(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ if (strcasecmp(f, "Off") == 0)
+ dconfig->script_reloading = 0;
+ else if (strcasecmp(f, "On") == 0)
+ dconfig->script_reloading = 1;
+ else
+ return "WSGIScriptReloading must be one of: Off | On";
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->script_reloading = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->script_reloading = 1;
+ else
+ return "WSGIScriptReloading must be one of: Off | On";
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_reload_mechanism(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ if (strcasecmp(f, "Module") == 0)
+ dconfig->reload_mechanism = 0;
+ else if (strcasecmp(f, "Interpreter") == 0)
+ dconfig->reload_mechanism = 1;
+ else
+ return "WSGIReloadMechanism must be one of: Module | Interpreter";
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ if (strcasecmp(f, "Module") == 0)
+ sconfig->reload_mechanism = 0;
+ else if (strcasecmp(f, "Interpreter") == 0)
+ sconfig->reload_mechanism = 1;
+ else
+ return "WSGIReloadMechanism must be one of: Module | Interpreter";
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_output_buffering(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ if (strcasecmp(f, "Off") == 0)
+ dconfig->output_buffering = 0;
+ else if (strcasecmp(f, "On") == 0)
+ dconfig->output_buffering = 1;
+ else
+ return "WSGIOutputBuffering must be one of: Off | On";
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->output_buffering = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->output_buffering = 1;
+ else
+ return "WSGIOutputBuffering must be one of: Off | On";
+ }
+
+ return NULL;
+}
+
+static const char *wsgi_set_case_sensitivity(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ if (cmd->path) {
+ WSGIDirectoryConfig *dconfig = NULL;
+ dconfig = (WSGIDirectoryConfig *)mconfig;
+
+ if (strcasecmp(f, "Off") == 0)
+ dconfig->case_sensitivity = 0;
+ else if (strcasecmp(f, "On") == 0)
+ dconfig->case_sensitivity = 1;
+ else
+ return "WSGICaseSensitivity must be one of: Off | On";
+ }
+ else {
+ WSGIServerConfig *sconfig = NULL;
+ sconfig = ap_get_module_config(cmd->server->module_config,
+ &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->case_sensitivity = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->case_sensitivity = 1;
+ else
+ return "WSGICaseSensitivity must be one of: Off | On";
+ }
+
+ return NULL;
+}
+
+/* Handler for the translate name phase. */
+
+static int wsgi_alias_matches(const char *uri, const char *alias_fakename)
+{
+ /* Code for this function from Apache mod_alias module. */
+
+ const char *aliasp = alias_fakename, *urip = uri;
+
+ while (*aliasp) {
+ if (*aliasp == '/') {
+ /* any number of '/' in the alias matches any number in
+ * the supplied URI, but there must be at least one...
+ */
+ if (*urip != '/')
+ return 0;
+
+ do {
+ ++aliasp;
+ } while (*aliasp == '/');
+ do {
+ ++urip;
+ } while (*urip == '/');
+ }
+ else {
+ /* Other characters are compared literally */
+ if (*urip++ != *aliasp++)
+ return 0;
+ }
+ }
+
+ /* Check last alias path component matched all the way */
+
+ if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/')
+ return 0;
+
+ /* Return number of characters from URI which matched (may be
+ * greater than length of alias, since we may have matched
+ * doubled slashes)
+ */
+
+ return urip - uri;
+}
+
+static int wsgi_hook_intercept(request_rec *r)
+{
+ WSGIServerConfig *config = NULL;
+
+ apr_array_header_t *aliases = NULL;
+
+ WSGIAliasEntry *entries = NULL;
+ WSGIAliasEntry *entry = NULL;
+
+ ap_regmatch_t matches[AP_MAX_REG_MATCH];
+
+ const char *location = NULL;
+ const char *application = NULL;
+
+ int i = 0;
+
+ config = ap_get_module_config(r->server->module_config, &wsgi_module);
+
+ if (!config->alias_list)
+ return DECLINED;
+
+ if (r->uri[0] != '/' && r->uri[0])
+ return DECLINED;
+
+ aliases = config->alias_list;
+ entries = (WSGIAliasEntry *)aliases->elts;
+
+ for (i = 0; i < aliases->nelts; ++i) {
+ int l = 0;
+
+ entry = &entries[i];
+
+ if (entry->regexp) {
+ if (!ap_regexec(entry->regexp, r->uri, AP_MAX_REG_MATCH,
+ matches, 0)) {
+ if (entry->application) {
+ l = matches[0].rm_eo;
+
+ location = apr_pstrndup(r->pool, r->uri, l);
+ application = ap_pregsub(r->pool, entry->application,
+ r->uri, AP_MAX_REG_MATCH,
+ matches);
+ }
+ }
+ }
+ else if (entry->location) {
+ l = wsgi_alias_matches(r->uri, entry->location);
+
+ location = entry->location;
+ application = entry->application;
+ }
+
+ if (l > 0) {
+ if (!strcmp(location, "/")) {
+ r->filename = apr_pstrcat(r->pool, application,
+ r->uri, NULL);
+ }
+ else {
+ r->filename = apr_pstrcat(r->pool, application,
+ r->uri + l, NULL);
+ }
+
+ r->handler = "wsgi-script";
+ apr_table_setn(r->notes, "alias-forced-type", r->handler);
+
+ return OK;
+ }
+ }
+
+ return DECLINED;
+}
+
+/* Handler for the response handler phase. */
+
+static void wsgi_log_script_error(request_rec *r, const char *e, const char *n)
+{
+ char *message = NULL;
+
+ if (!n)
+ n = r->filename;
+
+ message = apr_psprintf(r->pool, "%s: %s", e, n);
+ apr_table_set(r->notes, "error-notes", message);
+
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, message);
+}
+
+static void wsgi_build_environment(request_rec *r)
+{
+ WSGIRequestConfig *config = NULL;
+
+ const char *value = NULL;
+ const char *script_name = NULL;
+ const char *path_info = NULL;
+
+ conn_rec *c = r->connection;
+
+ /* Grab request configuration. */
+
+ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ /* Populate environment with standard CGI variables. */
+
+ ap_add_cgi_vars(r);
+ ap_add_common_vars(r);
+
+ /* Determine whether connection uses HTTPS protocol. */
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+ if (!wsgi_is_https)
+ wsgi_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
+
+ if (wsgi_is_https && wsgi_is_https(r->connection))
+ apr_table_set(r->subprocess_env, "HTTPS", "1");
+#endif
+
+ /*
+ * If enabled, pass along authorisation headers which Apache
+ * leaves out of CGI environment. WSGI still needs to see
+ * these if it needs to implement any of the standard
+ * authentication schemes such as Basic and Digest. We do
+ * not pass these through by default though as it can result
+ * in passwords being leaked though to a WSGI application
+ * when it shouldn't. This would be a problem where there is
+ * some sort of site wide authorisation scheme in place
+ * which has got nothing to do with specific applications.
+ */
+
+ if (config->pass_authorization) {
+ value = apr_table_get(r->headers_in, "Authorization");
+ if (value)
+ apr_table_setn(r->subprocess_env, "HTTP_AUTHORIZATION", value);
+ }
+
+ /* If PATH_INFO not set, set it to an empty string. */
+
+ value = apr_table_get(r->subprocess_env, "PATH_INFO");
+ if (!value)
+ apr_table_setn(r->subprocess_env, "PATH_INFO", "");
+
+ /*
+ * Multiple slashes are not always collapsed into a single
+ * slash in SCRIPT_NAME and PATH_INFO with Apache 1.3 and
+ * Apache 2.X behaving a bit differently. Because some WSGI
+ * applications don't deal with multiple slashes properly we
+ * collapse any duplicate slashes to a single slash so
+ * Apache behaviour is consistent across all versions. We
+ * don't care that PATH_TRANSLATED can on Apache 1.3 still
+ * contain multiple slashes as that should not be getting
+ * used from a WSGI application anyway.
+ */
+
+ script_name = apr_table_get(r->subprocess_env, "SCRIPT_NAME");
+
+ if (*script_name) {
+ while (*script_name && (*(script_name+1) == '/'))
+ script_name++;
+ script_name = apr_pstrdup(r->pool, script_name);
+ ap_no2slash((char*)script_name);
+ apr_table_setn(r->subprocess_env, "SCRIPT_NAME", script_name);
+ }
+
+ path_info = apr_table_get(r->subprocess_env, "PATH_INFO");
+
+ if (*path_info) {
+ while (*path_info && (*(path_info+1) == '/'))
+ path_info++;
+ path_info = apr_pstrdup(r->pool, path_info);
+ ap_no2slash((char*)path_info);
+ apr_table_setn(r->subprocess_env, "PATH_INFO", path_info);
+ }
+
+ /*
+ * Set values specific to mod_wsgi configuration. These control
+ * aspects of how a request is managed but don't strictly need
+ * to be passed through to the application itself. It is though
+ * easier to set them here as then they are carried across to
+ * the daemon process as part of the environment where they can
+ * be extracted and used.
+ */
+
+ apr_table_setn(r->subprocess_env, "mod_wsgi.process_group",
+ config->process_group);
+ apr_table_setn(r->subprocess_env, "mod_wsgi.application_group",
+ config->application_group);
+ apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object",
+ config->callable_object);
+
+ apr_table_setn(r->subprocess_env, "mod_wsgi.script_reloading",
+ apr_psprintf(r->pool, "%d", config->script_reloading));
+ apr_table_setn(r->subprocess_env, "mod_wsgi.reload_mechanism",
+ apr_psprintf(r->pool, "%d", config->reload_mechanism));
+ apr_table_setn(r->subprocess_env, "mod_wsgi.output_buffering",
+ apr_psprintf(r->pool, "%d", config->output_buffering));
+ apr_table_setn(r->subprocess_env, "mod_wsgi.case_sensitivity",
+ apr_psprintf(r->pool, "%d", config->case_sensitivity));
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ apr_table_setn(r->subprocess_env, "mod_wsgi.listener_host",
+ c->local_addr->hostname ? c->local_addr->hostname : "");
+ apr_table_setn(r->subprocess_env, "mod_wsgi.listener_port",
+ apr_psprintf(r->pool, "%d", c->local_addr->port));
+#endif
+}
+
+static int wsgi_is_script_aliased(request_rec *r)
+{
+ const char *t = NULL;
+
+ t = apr_table_get(r->notes, "alias-forced-type");
+ return t && (!strcasecmp(t, "wsgi-script"));
+}
+
+#if !defined(WIN32)
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+static int wsgi_execute_remote(request_rec *r);
+#endif
+#endif
+
+static int wsgi_hook_handler(request_rec *r)
+{
+ int status;
+
+ WSGIRequestConfig *config = NULL;
+
+ /*
+ * Only process requests for this module. Honour a content
+ * type here because mod_rewrite prior to Apache 2.2 only
+ * provides a means of setting content type and doesn't
+ * provide a means of setting the handler name explicitly.
+ */
+
+ if (!r->handler || (strcmp(r->handler, "wsgi-script") &&
+ strcmp(r->handler, "application/x-httpd-wsgi"))) {
+ return DECLINED;
+ }
+
+ /*
+ * Ensure that have adequate privileges to run the WSGI
+ * script. Require ExecCGI to be specified in Options for
+ * this. In doing this, using the wider interpretation that
+ * ExecCGI refers to any executable like script even though
+ * not a separate process execution.
+ */
+
+ if (!(ap_allow_options(r) & OPT_EXECCGI) && !wsgi_is_script_aliased(r)) {
+ wsgi_log_script_error(r, "Options ExecCGI is off in this directory",
+ r->filename);
+ return HTTP_FORBIDDEN;
+ }
+
+ /* Ensure target script exists and is a file. */
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ if (r->finfo.st_mode == 0) {
+ wsgi_log_script_error(r, "Target WSGI script not found or unable "
+ "to stat", r->filename);
+ return HTTP_NOT_FOUND;
+ }
+#else
+ if (r->finfo.filetype == 0) {
+ wsgi_log_script_error(r, "Target WSGI script not found or unable "
+ "to stat", r->filename);
+ return HTTP_NOT_FOUND;
+ }
+#endif
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+ if (S_ISDIR(r->finfo.st_mode)) {
+ wsgi_log_script_error(r, "Attempt to invoke directory as WSGI "
+ "application", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+#else
+ if (r->finfo.filetype == APR_DIR) {
+ wsgi_log_script_error(r, "Attempt to invoke directory as WSGI "
+ "application", r->filename);
+ return HTTP_FORBIDDEN;
+ }
+#endif
+
+ /*
+ * For Apache 2.0+ honour AcceptPathInfo directive. Default
+ * behaviour is accept additional path information. Under
+ * Apache 1.3, WSGI application would need to check itself.
+ */
+
+#if AP_MODULE_MAGIC_AT_LEAST(20011212,0)
+ if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
+ r->path_info && *r->path_info) {
+ wsgi_log_script_error(r, "AcceptPathInfo off disallows user's path",
+ r->filename);
+ return HTTP_NOT_FOUND;
+ }
+#endif
+
+ /*
+ * Setup policy to apply if request contains a body. Note
+ * that it is not possible to have chunked transfer encoding
+ * for the request content. This is actually a limitation in
+ * WSGI specification as it has no way of indicating that
+ * there is content of unknown length, nor a way to deal
+ * with trailers appearing after any chunked content.
+ */
+
+ status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR);
+
+ if (status != OK)
+ return status;
+
+ /*
+ * Construct request configuration and cache it in the
+ * request object against this module so can access it
+ * later from handler code.
+ */
+
+ config = wsgi_create_req_config(r->pool, r);
+
+ ap_set_module_config(r->request_config, &wsgi_module, config);
+
+ /* Build the sub process environment. */
+
+ wsgi_build_environment(r);
+
+ /*
+ * Execute the target WSGI application script or proxy
+ * request to one of the daemon processes as appropriate.
+ */
+
+#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+ status = wsgi_execute_remote(r);
+
+ if (status != DECLINED)
+ return status;
+#endif
+
+ return wsgi_execute_script(r);
+}
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+
+/*
+ * Apache 1.3 module initialisation functions.
+ */
+
+void wsgi_hook_init(server_rec *s, apr_pool_t *p)
+{
+ char package[128];
+
+ /* Setup module version information. */
+
+ sprintf(package, "mod_wsgi/%d.%d-TRUNK", MOD_WSGI_MAJORVERSION_NUMBER,
+ MOD_WSGI_MINORVERSION_NUMBER);
+
+ ap_add_version_component(package);
+
+ /* Retain reference to base server. */
+
+ wsgi_server = s;
+
+ /* Retain record of parent process ID. */
+
+ wsgi_parent_pid = getpid();
+
+ /* Determine whether multiprocess and/or multithreaded. */
+
+ wsgi_multiprocess = 1;
+ wsgi_multithread = 0;
+
+ /* Retain reference to main server config. */
+
+ wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module);
+
+ /* Initialise Python if not already done. */
+
+ wsgi_python_init(p);
+}
+
+static void wsgi_hook_child_init(server_rec *s, apr_pool_t *p)
+{
+ wsgi_python_child_init(p);
+}
+
+/* Dispatch list of content handlers */
+static const handler_rec wsgi_handlers[] = {
+ { "wsgi-script", wsgi_hook_handler },
+ { "application/x-httpd-wsgi", wsgi_hook_handler },
+ { NULL, NULL }
+};
+
+static const command_rec wsgi_commands[] =
+{
+ { "WSGIScriptAlias", wsgi_add_script_alias, NULL,
+ RSRC_CONF, TAKE2, "Map location to target WSGI script file." },
+ { "WSGIScriptAliasMatch", wsgi_add_script_alias, "*",
+ RSRC_CONF, TAKE2, "Map location to target WSGI script file." },
+
+ { "WSGIPythonOptimize", wsgi_set_python_optimize, NULL,
+ RSRC_CONF, TAKE1, "Set level of Python compiler optimisations." },
+#ifndef WIN32
+ { "WSGIPythonExecutable", wsgi_set_python_executable, NULL,
+ RSRC_CONF, TAKE1, "Python executable absolute path name." },
+ { "WSGIPythonHome", wsgi_set_python_home, NULL,
+ RSRC_CONF, TAKE1, "Python prefix/exec_prefix absolute path names." },
+ { "WSGIPythonPath", wsgi_set_python_path, NULL,
+ RSRC_CONF, TAKE1, "Python module search path." },
+#endif
+
+ { "WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL,
+ RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDIN." },
+ { "WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL,
+ RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDOUT." },
+ { "WSGIRestrictSignal", wsgi_set_restrict_signal, NULL,
+ RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of signal()." },
+
+ { "WSGIApplicationGroup", wsgi_set_application_group, NULL,
+ ACCESS_CONF|RSRC_CONF, TAKE1, "Name of WSGI application group." },
+ { "WSGICallableObject", wsgi_set_callable_object, NULL,
+ OR_FILEINFO, TAKE1, "Name of entry point in WSGI script file." },
+
+ { "WSGIPassAuthorization", wsgi_set_pass_authorization, NULL,
+ ACCESS_CONF|RSRC_CONF, TAKE1, "Enable/Disable WSGI authorization." },
+ { "WSGIScriptReloading", wsgi_set_script_reloading, NULL,
+ OR_FILEINFO, TAKE1, "Enable/Disable script reloading mechanism." },
+ { "WSGIReloadMechanism", wsgi_set_reload_mechanism, NULL,
+ OR_FILEINFO, TAKE1, "Defines what is reloaded when a reload occurs." },
+ { "WSGIOutputBuffering", wsgi_set_output_buffering, NULL,
+ OR_FILEINFO, TAKE1, "Enable/Disable buffering of response." },
+ { "WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL,
+ OR_FILEINFO, TAKE1, "Define whether file system is case sensitive." },
+
+ { NULL }
+};
+
+/* Dispatch list for API hooks */
+
+module MODULE_VAR_EXPORT wsgi_module = {
+ STANDARD_MODULE_STUFF,
+ wsgi_hook_init, /* module initializer */
+ wsgi_create_dir_config, /* create per-dir config structures */
+ wsgi_merge_dir_config, /* merge per-dir config structures */
+ wsgi_create_server_config, /* create per-server config structures */
+ wsgi_merge_server_config, /* merge per-server config structures */
+ wsgi_commands, /* table of config file commands */
+ wsgi_handlers, /* [#8] MIME-typed-dispatched handlers */
+ wsgi_hook_intercept, /* [#1] URI to filename translation */
+ NULL, /* [#4] validate user id from request */
+ NULL, /* [#5] check if the user is ok _here_ */
+ NULL, /* [#3] check access by host address */
+ NULL, /* [#6] determine MIME type */
+ NULL, /* [#7] pre-run fixups */
+ NULL, /* [#9] log a transaction */
+ NULL, /* [#2] header parser */
+ wsgi_hook_child_init, /* child_init */
+ NULL, /* child_exit */
+ NULL /* [#0] post read-request */
+#ifdef EAPI
+ ,NULL, /* EAPI: add_module */
+ NULL, /* EAPI: remove_module */
+ NULL, /* EAPI: rewrite_command */
+ NULL /* EAPI: new_connection */
+#endif
+};
+
+#else
+
+/*
+ * Apache 2.X and UNIX specific code for creation and management
+ * of distinct daemon processes.
+ */
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+
+#include "unixd.h"
+#include "scoreboard.h"
+#include "mpm_common.h"
+#include "apr_proc_mutex.h"
+#include "http_connection.h"
+#include "apr_buckets.h"
+#include "apr_poll.h"
+
+#if APR_HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_SEM_H
+#include <sys/sem.h>
+#endif
+
+#include <sys/un.h>
+
+#ifndef WSGI_LISTEN_BACKLOG
+#define WSGI_LISTEN_BACKLOG 100
+#endif
+
+#ifndef WSGI_CONNECT_ATTEMPTS
+#define WSGI_CONNECT_ATTEMPTS 15
+#endif
+
+typedef struct {
+ server_rec *server;
+ long random;
+ int id;
+ const char *name;
+ const char *user;
+ uid_t uid;
+ const char *group;
+ gid_t gid;
+ int processes;
+ int multiprocess;
+ int threads;
+ int umask;
+ const char *home;
+ const char *socket;
+ int listener_fd;
+ const char* mutex_path;
+ apr_proc_mutex_t* mutex;
+} WSGIProcessGroup;
+
+typedef struct {
+ WSGIProcessGroup *group;
+ int instance;
+ apr_proc_t process;
+ apr_socket_t *listener;
+} WSGIDaemonProcess;
+
+typedef struct {
+ WSGIDaemonProcess *process;
+ apr_thread_t *thread;
+ int running;
+} WSGIDaemonThread;
+
+typedef struct {
+ const char *name;
+ const char *socket;
+ int fd;
+} WSGIDaemonSocket;
+
+static apr_pool_t *wsgi_parent_pool = NULL;
+static apr_pool_t *wsgi_daemon_pool = NULL;
+
+static int wsgi_daemon_count = 0;
+static apr_hash_t *wsgi_daemon_index = NULL;
+static apr_hash_t *wsgi_daemon_listeners = NULL;
+
+static WSGIDaemonProcess *wsgi_daemon_process = NULL;
+
+static int wsgi_daemon_shutdown = 0;
+
+static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
+ const char *args)
+{
+ const char *error = NULL;
+ WSGIServerConfig *config = NULL;
+
+ const char *name = NULL;
+ const char *user = NULL;
+ const char *group = NULL;
+
+ int processes = 1;
+ int multiprocess = 0;
+ int threads = 15;
+ int umask = -1;
+
+ const char *home = NULL;
+
+ uid_t uid = unixd_config.user_id;
+ uid_t gid = unixd_config.group_id;
+
+ const char *option = NULL;
+ const char *value = NULL;
+
+ WSGIProcessGroup *entries = NULL;
+ WSGIProcessGroup *entry = NULL;
+
+ int i;
+
+ name = ap_getword_conf(cmd->temp_pool, &args);
+
+ if (!name || !*name)
+ return "Name of WSGI daemon process not supplied.";
+
+ while (*args) {
+ option = ap_getword_conf(cmd->temp_pool, &args);
+
+ if (strstr(option, "user=") == option) {
+ value = option + 5;
+ if (!*value)
+ return "Invalid user for WSGI daemon process.";
+
+ user = value;
+ uid = ap_uname2id(user);
+ if (uid == 0)
+ return "WSGI process blocked from running as root.";
+
+ if (*user == '#') {
+ struct passwd *entry = NULL;
+
+ if ((entry = getpwuid(uid)) == NULL)
+ return "Couldn't determine user name from uid.";
+
+ user = entry->pw_name;
+ }
+ }
+ else if (strstr(option, "group=") == option) {
+ value = option + 6;
+ if (!*value)
+ return "Invalid group for WSGI daemon process.";
+
+ group = value;
+ gid = ap_gname2id(group);
+ }
+ else if (strstr(option, "processes=") == option) {
+ value = option + 10;
+ if (!*value)
+ return "Invalid process count for WSGI daemon process.";
+
+ processes = atoi(value);
+ if (processes < 1)
+ return "Invalid process count for WSGI daemon process.";
+
+ multiprocess = 1;
+ }
+ else if (strstr(option, "threads=") == option) {
+ value = option + 8;
+ if (!*value)
+ return "Invalid thread count for WSGI daemon process.";
+
+ threads = atoi(value);
+ if (threads < 1)
+ return "Invalid thread count for WSGI daemon process.";
+ }
+ else if (strstr(option, "umask=") == option) {
+ value = option + 6;
+ if (!*value)
+ return "Invalid umask for WSGI daemon process.";
+
+ errno = 0;
+ umask = strtol(value, (char **)&value, 7);
+
+ if (*value || errno == ERANGE || umask < 0)
+ return "Invalid umask for WSGI daemon process.";
+ }
+ else if (strstr(option, "home=") == option) {
+ value = option + 5;
+ if (*value != '/')
+ return "Invalid home directory for WSGI daemon process.";
+
+ home = value;
+ }
+ else
+ return "Invalid option to WSGI daemon process definition.";
+ }
+
+ if (!wsgi_daemon_list) {
+ wsgi_daemon_list = apr_array_make(cmd->pool, 20,
+ sizeof(WSGIProcessGroup));
+ }
+
+ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts;
+
+ for (i = 0; i < wsgi_daemon_list->nelts; ++i) {
+ entry = &entries[i];
+
+ if (!strcmp(entry->name, name))
+ return "Name duplicates previous WSGI daemon definition.";
+ }
+
+ wsgi_daemon_count++;
+
+ entry = (WSGIProcessGroup *)apr_array_push(wsgi_daemon_list);
+
+ entry->server = cmd->server;
+
+ entry->random = random();
+ entry->id = wsgi_daemon_count;
+
+ entry->name = apr_pstrdup(cmd->pool, name);
+ entry->user = apr_pstrdup(cmd->pool, user);
+ entry->group = apr_pstrdup(cmd->pool, group);
+
+ entry->uid = uid;
+ entry->gid = gid;
+
+ entry->processes = processes;
+ entry->multiprocess = multiprocess;
+ entry->threads = threads;
+
+ entry->umask = umask;
+ entry->home = home;
+
+ return NULL;
+}
+
+static const char *wsgi_set_socket_prefix(cmd_parms *cmd, void *mconfig,
+ const char *arg)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ sconfig->socket_prefix = apr_psprintf(cmd->pool, "%s.%d", arg, getpid());
+ sconfig->socket_prefix = ap_server_root_relative(cmd->pool,
+ sconfig->socket_prefix);
+
+ if (!sconfig->socket_prefix) {
+ return apr_pstrcat(cmd->pool, "Invalid WSGISocketPrefix '",
+ arg, "'.", NULL);
+ }
+
+ return NULL;
+}
+
+static const char wsgi_valid_accept_mutex_string[] =
+ "Valid accept mutex mechanisms for this platform are: default"
+#if APR_HAS_FLOCK_SERIALIZE
+ ", flock"
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+ ", fcntl"
+#endif
+#if APR_HAS_SYSVSEM_SERIALIZE
+ ", sysvsem"
+#endif
+#if APR_HAS_POSIXSEM_SERIALIZE
+ ", posixsem"
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+ ", pthread"
+#endif
+ ".";
+
+static const char *wsgi_set_accept_mutex(cmd_parms *cmd, void *mconfig,
+ const char *arg)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ sconfig->lock_mechanism = ap_accept_lock_mech;
+
+ if (!strcasecmp(arg, "default")) {
+ sconfig->lock_mechanism = APR_LOCK_DEFAULT;
+ }
+#if APR_HAS_FLOCK_SERIALIZE
+ else if (!strcasecmp(arg, "flock")) {
+ sconfig->lock_mechanism = APR_LOCK_FLOCK;
+ }
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+ else if (!strcasecmp(arg, "fcntl")) {
+ sconfig->lock_mechanism = APR_LOCK_FCNTL;
+ }
+#endif
+#if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)
+ else if (!strcasecmp(arg, "sysvsem")) {
+ sconfig->lock_mechanism = APR_LOCK_SYSVSEM;
+ }
+#endif
+#if APR_HAS_POSIXSEM_SERIALIZE
+ else if (!strcasecmp(arg, "posixsem")) {
+ sconfig->lock_mechanism = APR_LOCK_POSIXSEM;
+ }
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+ else if (!strcasecmp(arg, "pthread")) {
+ sconfig->lock_mechanism = APR_LOCK_PROC_PTHREAD;
+ }
+#endif
+ else {
+ return apr_pstrcat(cmd->pool, "Accept mutex lock mechanism '", arg,
+ "' is invalid. ", wsgi_valid_accept_mutex_string,
+ NULL);
+ }
+
+ return NULL;
+}
+
+static void wsgi_signal_handler(int signum)
+{
+ wsgi_daemon_shutdown++;
+}
+
+static int wsgi_check_signal(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ wsgi_daemon_shutdown++;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon);
+
+static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
+{
+ WSGIDaemonProcess *daemon = data;
+
+ switch (reason) {
+
+ /* Child daemon process has died. */
+
+ case APR_OC_REASON_DEATH: {
+ int mpm_state;
+ int stopping;
+
+ /* Stop watching the existing process. */
+
+ apr_proc_other_child_unregister(daemon);
+
+ /*
+ * Determine if Apache is being shutdown or not and
+ * if it is not being shutdown, restart the child
+ * daemon process that has died. If MPM doesn't
+ * support query assume that child daemon process
+ * shouldn't be restarted. Both prefork and worker
+ * MPMs support this query so should always be okay.
+ */
+
+ stopping = 1;
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state) == APR_SUCCESS
+ && mpm_state != AP_MPMQ_STOPPING) {
+ stopping = 0;
+ }
+
+ if (!stopping) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' has died, restarting.",
+ daemon->process.pid, daemon->group->name);
+
+ wsgi_start_process(wsgi_parent_pool, daemon);
+ }
+
+ break;
+ }
+
+ /* Apache is being restarted or shutdown. */
+
+ case APR_OC_REASON_RESTART: {
+
+ /* Stop watching the existing process. */
+
+ apr_proc_other_child_unregister(daemon);
+
+ /*
+ * Remove socket used for communicating with daemon
+ * when the process to be notified is the first in
+ * the process group.
+ */
+
+ if (daemon->instance == 1) {
+ if (close(daemon->group->listener_fd) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't close unix domain socket '%s'.",
+ getpid(), daemon->group->socket);
+ }
+
+ if (unlink(daemon->group->socket) < 0 && errno != ENOENT) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't unlink unix domain socket '%s'.",
+ getpid(), daemon->group->socket);
+ }
+ }
+
+ break;
+ }
+
+ /* Child daemon process vanished. */
+
+ case APR_OC_REASON_LOST: {
+
+ /* Stop watching the existing process. */
+
+ apr_proc_other_child_unregister(daemon);
+
+ /* Restart the child daemon process that has died. */
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' has died, restarting.",
+ daemon->process.pid, daemon->group->name);
+
+ wsgi_start_process(wsgi_parent_pool, daemon);
+
+ break;
+ }
+
+ /* Call to unregister the process. */
+
+ case APR_OC_REASON_UNREGISTER: {
+
+ /* Nothing to do at present. */
+
+ break;
+ }
+ }
+}
+
+static void wsgi_setup_access(WSGIDaemonProcess *daemon)
+{
+ /* Setup the umask for the effective user. */
+
+ if (daemon->group->umask != -1)
+ umask(daemon->group->umask);
+
+ /* Setup the working directory.*/
+
+ if (daemon->group->home) {
+ if (chdir(daemon->group->home) == -1) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Unable to change working "
+ "directory to '%s'.", getpid(), daemon->group->home);
+ }
+ }
+
+ /* Don't bother switch user/group if not root. */
+
+ if (geteuid())
+ return;
+
+ /* Setup the daemon process real and effective group. */
+
+ if (setgid(daemon->group->gid) == -1) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Unable to set group id to gid=%d.",
+ getpid(), daemon->group->gid);
+ }
+ else {
+ if (initgroups(daemon->group->user, daemon->group->gid) == -1) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno),
+ wsgi_server, "mod_wsgi (pid=%d): Unable "
+ "to set groups for uname=%s and gid=%u.", getpid(),
+ daemon->group->user, (unsigned)daemon->group->gid);
+ }
+ }
+
+ /* Setup the daemon process real and effective user. */
+
+ if (setuid(daemon->group->uid) == -1) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Unable to change to uid=%ld.",
+ getpid(), (long)daemon->group->uid);
+ }
+}
+
+static int wsgi_setup_socket(WSGIProcessGroup *process)
+{
+ int sockfd = -1;
+ struct sockaddr_un addr;
+ apr_socklen_t addlen;
+ mode_t omask;
+ int rc;
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ "mod_wsgi (pid=%d): Socket for '%s' is '%s'.",
+ getpid(), process->name, process->socket);
+
+ if ((sockfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't create unix domain "
+ "socket.", getpid());
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ apr_cpystrn(addr.sun_path, process->socket, sizeof(addr.sun_path));
+
+ omask = umask(0077);
+ rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr));
+ umask(omask);
+ if (rc < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't bind unix domain "
+ "socket '%s'.", getpid(), process->socket);
+ return -1;
+ }
+
+ if (listen(sockfd, WSGI_LISTEN_BACKLOG) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't listen on unix domain "
+ "socket.", getpid());
+ return -1;
+ }
+
+ if (!geteuid()) {
+ if (chown(process->socket, unixd_config.user_id, -1) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't change owner of unix "
+ "domain socket '%s'.", getpid(),
+ process->socket);
+ return -1;
+ }
+ }
+
+ return sockfd;
+}
+
+static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock,
+ apr_bucket_alloc_t *bucket_alloc,
+ WSGIDaemonProcess *daemon)
+{
+ conn_rec *current_conn;
+ ap_sb_handle_t *sbh;
+
+ ap_create_sb_handle(&sbh, p, -1, 0);
+
+ current_conn = ap_run_create_connection(p, daemon->group->server, sock,
+ 1, sbh, bucket_alloc);
+ if (current_conn) {
+ ap_process_connection(current_conn, sock);
+ ap_lingering_close(current_conn);
+ }
+}
+
+static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
+{
+ apr_status_t status;
+ apr_socket_t *socket;
+
+ apr_pool_t *ptrans;
+
+ apr_pollset_t *pollset;
+ apr_pollfd_t pfd = { 0 };
+ apr_int32_t numdesc;
+ const apr_pollfd_t *pdesc;
+
+ apr_bucket_alloc_t *bucket_alloc;
+
+ WSGIDaemonProcess *daemon = thread->process;
+ WSGIProcessGroup *group = daemon->group;
+
+ /* Loop until signal received to shutdown daemon process. */
+
+ while (!wsgi_daemon_shutdown) {
+ apr_status_t rv;
+
+ if (group->mutex) {
+ /*
+ * Grab the accept mutex across all daemon processes
+ * in this process group.
+ */
+
+ rv = apr_proc_mutex_lock(group->mutex);
+
+ if (rv != APR_SUCCESS) {
+#if defined(EIDRM)
+ /*
+ * When using multiple threads locking the
+ * process accept mutex fails with an EIDRM when
+ * process being shutdown but signal check
+ * hasn't triggered quick enough to set shutdown
+ * flag. This causes lots of error messages to
+ * be logged which make it look like something
+ * nasty has happened even when it hasn't. For
+ * now assume that if multiple threads and EIDRM
+ * occurs that it is okay and the process is
+ * being shutdown. The condition should by
+ * rights only occur when the Apache parent
+ * process is being shutdown or has died for
+ * some reason so daemon process would logically
+ * therefore also be in process of being
+ * shutdown or killed.
+ */
+ if (!strcmp(apr_proc_mutex_name(group->mutex), "sysvsem")) {
+ if (errno == EIDRM && group->threads > 1)
+ wsgi_daemon_shutdown = 1;
+ }
+#endif
+
+ if (!wsgi_daemon_shutdown) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't acquire accept mutex '%s'. "
+ "Shutting down daemon process.",
+ getpid(), group->socket);
+
+ kill(getpid(), SIGTERM);
+ sleep(5);
+ }
+
+ break;
+ }
+
+ /*
+ * Daemon process being shutdown so don't accept the
+ * connection after all.
+ */
+
+ if (wsgi_daemon_shutdown) {
+ apr_proc_mutex_unlock(group->mutex);
+
+ break;
+ }
+ }
+
+ apr_pool_create(&ptrans, p);
+
+ /*
+ * Accept socket connection from the child process. We
+ * test the socket for whether it is ready before actually
+ * performing the accept() so that can know for sure that
+ * we will be processing a request and flag thread as
+ * running. Only bother to do join with thread which is
+ * actually running when process is being shutdown.
+ */
+
+ apr_pollset_create(&pollset, 1, ptrans, 0);
+
+ memset(&pfd, '\0', sizeof(pfd));
+ pfd.desc_type = APR_POLL_SOCKET;
+ pfd.desc.s = daemon->listener;
+ pfd.reqevents = APR_POLLIN;
+ pfd.client_data = daemon;
+
+ apr_pollset_add(pollset, &pfd);
+
+ while (!wsgi_daemon_shutdown) {
+ rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
+
+ if (rv == APR_SUCCESS)
+ break;
+
+ if (APR_STATUS_IS_EINTR(rv))
+ break;
+
+ if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Unable to poll daemon socket for '%s'. "
+ "Shutting down daemon process.",
+ getpid(), group->socket);
+
+ kill(getpid(), SIGTERM);
+ sleep(5);
+ }
+ }
+
+ if (wsgi_daemon_shutdown) {
+ if (group->mutex)
+ apr_proc_mutex_unlock(group->mutex);
+
+ apr_pool_destroy(ptrans);
+
+ break;
+ }
+
+ if (rv != APR_SUCCESS && APR_STATUS_IS_EINTR(rv)) {
+ if (group->mutex)
+ apr_proc_mutex_unlock(group->mutex);
+
+ apr_pool_destroy(ptrans);
+
+ continue;
+ }
+
+ thread->running = 1;
+
+ status = apr_socket_accept(&socket, daemon->listener, ptrans);
+
+ if (group->mutex) {
+ apr_status_t rv;
+ rv = apr_proc_mutex_unlock(group->mutex);
+ if (rv != APR_SUCCESS) {
+ if (!wsgi_daemon_shutdown) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't release accept mutex '%s'.",
+ getpid(), group->socket);
+ apr_pool_destroy(ptrans);
+ thread->running = 0;
+
+ break;
+ }
+ }
+ }
+
+ if (status != APR_SUCCESS && APR_STATUS_IS_EINTR(status)) {
+ apr_pool_destroy(ptrans);
+ thread->running = 0;
+
+ continue;
+ }
+
+ /* Process the request proxied from the child process. */
+
+ bucket_alloc = apr_bucket_alloc_create(ptrans);
+ wsgi_process_socket(ptrans, socket, bucket_alloc, daemon);
+
+ /* Cleanup ready for next request. */
+
+ apr_pool_destroy(ptrans);
+
+ thread->running = 0;
+ }
+}
+
+static void *wsgi_daemon_thread(apr_thread_t *thd, void *data)
+{
+ WSGIDaemonThread *thread = data;
+ apr_pool_t *p = apr_thread_pool_get(thd);
+
+ wsgi_daemon_worker(p, thread);
+
+ apr_thread_exit(thd, APR_SUCCESS);
+
+ return NULL;
+}
+
+static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
+{
+ /*
+ * If process running in single threaded mode only need run
+ * the main worker function. If multiple threads required
+ * then startup all the threads and wait for them to exit
+ * when shutdown is signaled.
+ */
+
+ if (daemon->group->threads == 1) {
+ WSGIDaemonThread thread;
+
+ thread.process = daemon;
+ thread.thread = NULL;
+ thread.running = 0;
+
+ wsgi_daemon_worker(p, &thread);
+ }
+ else {
+ WSGIDaemonThread *threads;
+ apr_threadattr_t *thread_attr;
+
+ int i;
+ apr_status_t rv;
+ apr_status_t thread_rv;
+
+ /* Block all signals from being received. */
+
+ rv = apr_setup_signal_thread();
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_EMERG(rv), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't initialise signal "
+ "thread in daemon process '%s'.", getpid(),
+ daemon->group->name);
+ sleep(20);
+
+ return;
+ }
+
+ /* Ensure that threads are joinable. */
+
+ apr_threadattr_create(&thread_attr, p);
+ apr_threadattr_detach_set(thread_attr, 0);
+
+ /* Start the required number of threads. */
+
+ threads = (WSGIDaemonThread *)apr_pcalloc(p, daemon->group->threads
+ * sizeof(WSGIDaemonThread));
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ "mod_wsgi (pid=%d): Starting %d threads in daemon "
+ "process '%s'.", getpid(), daemon->group->threads,
+ daemon->group->name);
+
+ for (i=0; i<daemon->group->threads; i++) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ "mod_wsgi (pid=%d): Starting thread %d in daemon "
+ "process '%s'.", getpid(), i+1, daemon->group->name);
+
+ threads[i].process = daemon;
+ threads[i].running = 0;
+
+ rv = apr_thread_create(&threads[i].thread, thread_attr,
+ wsgi_daemon_thread, &threads[i], p);
+
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't create worker "
+ "thread %d in daemon process '%s'.", getpid(),
+ i, daemon->group->name);
+
+ /*
+ * Try to force an exit of the process if fail
+ * to create the worker threads.
+ */
+
+ kill(getpid(), SIGTERM);
+ sleep(5);
+ }
+ }
+
+ /* Block until we get a process shutdown signal. */
+
+ apr_signal_thread(wsgi_check_signal);
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Shutdown requested '%s'.",
+ getpid(), daemon->group->name);
+
+ /*
+ * Attempt a graceful shutdown by waiting for any
+ * threads which were processing a request at the time
+ * of shutdown. In some respects this is a bit pointless
+ * as even though we allow the requests to be completed,
+ * the Apache child process which proxied the request
+ * through to this daemon process could get killed off
+ * before the daemon process and so the response gets
+ * cut off or lost.
+ */
+
+ for (i=0; i<daemon->group->threads; i++) {
+ if (threads[i].thread && threads[i].running) {
+ rv = apr_thread_join(&thread_rv, threads[i].thread);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't join with "
+ "worker thread %d in daemon process '%s'.",
+ getpid(), i, daemon->group->name);
+ }
+ }
+ }
+ }
+}
+
+static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
+{
+ apr_status_t status;
+
+ ap_listen_rec *lr;
+
+ if ((status = apr_proc_fork(&daemon->process, p)) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ "mod_wsgi: Couldn't spawn process '%s'.",
+ daemon->group->name);
+ return DECLINED;
+ }
+ else if (status == APR_INCHILD) {
+ if (!geteuid()) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Starting process '%s' with "
+ "uid=%ld, gid=%u and threads=%d.", getpid(),
+ daemon->group->name, (long)daemon->group->uid,
+ (unsigned)daemon->group->gid, daemon->group->threads);
+ }
+ else {
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Starting process '%s' with "
+ "threads=%d.", getpid(), daemon->group->name,
+ daemon->group->threads);
+ }
+
+#ifdef HAVE_BINDPROCESSOR
+ /*
+ * By default, AIX binds to a single processor. This
+ * bit unbinds children which will then bind to another
+ * CPU.
+ */
+
+ status = bindprocessor(BINDPROCESS, (int)getpid(),
+ PROCESSOR_CLASS_ANY);
+ if (status != OK) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Failed to unbind processor.",
+ getpid());
+ }
+#endif
+
+ /* Setup daemon process user/group/umask etc. */
+
+ wsgi_setup_access(daemon);
+
+ /* Reinitialise accept mutex in daemon process. */
+
+ if (daemon->group->mutex) {
+ status = apr_proc_mutex_child_init(&daemon->group->mutex,
+ daemon->group->mutex_path, p);
+
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't intialise accept "
+ "mutex in daemon process '%s'.",
+ getpid(), daemon->group->mutex_path);
+
+ /* Don't die immediately to avoid a fork bomb. */
+
+ sleep(20);
+
+ exit(-1);
+ }
+ }
+
+ /*
+ * Create a lookup table of listener socket address
+ * details so can use it later in daemon when trying
+ * to map request to correct virtual host server.
+ */
+
+ wsgi_daemon_listeners = apr_hash_make(p);
+
+ for (lr = ap_listeners; lr; lr = lr->next) {
+ char *key;
+ char *host;
+ apr_port_t port;
+
+ host = lr->bind_addr->hostname;
+ port = lr->bind_addr->port;
+
+ if (!host)
+ host = "";
+
+ key = apr_psprintf(p, "%s|%d", host, port);
+
+ apr_hash_set(wsgi_daemon_listeners, key, APR_HASH_KEY_STRING,
+ lr->bind_addr);
+ }
+
+ /*
+ * Close child copy of the listening sockets for the
+ * Apache parent process so we don't interfere with
+ * the parent process.
+ */
+
+ ap_close_listeners();
+
+ /*
+ * Register signal handler to receive shutdown signal
+ * from Apache parent process.
+ */
+
+ wsgi_daemon_shutdown = 0;
+
+ apr_signal(SIGCHLD, SIG_IGN);
+
+ if (daemon->group->threads == 1) {
+ apr_signal(SIGINT, wsgi_signal_handler);
+ apr_signal(SIGTERM, wsgi_signal_handler);
+ }
+
+ /*
+ * Flag whether multiple daemon processes or denoted
+ * that requests could be spread across multiple daemon
+ * process groups.
+ */
+
+ wsgi_multiprocess = daemon->group->multiprocess;
+ wsgi_multithread = daemon->group->threads != 1;
+
+ /*
+ * Create a pool for the child daemon process so
+ * we can trigger various events off it at shutdown.
+ */
+
+ apr_pool_create(&wsgi_daemon_pool, p);
+
+ /*
+ * If mod_python is also being loaded and thus it was
+ * responsible for initialising Python it can leave in
+ * place an active thread state. Under normal conditions
+ * this would be eliminated in Apache child process by
+ * the time that mod_wsgi got to do its own child
+ * initialisation but in daemon process we skip the
+ * mod_python child initialisation so the active thread
+ * state still exists. Thus need to do a bit of a fiddle
+ * to ensure there is no active thread state.
+ */
+
+ if (!wsgi_python_initialized) {
+ PyGILState_STATE state;
+
+ PyEval_AcquireLock();
+
+ state = PyGILState_Ensure();
+ PyGILState_Release(state);
+
+ if (state == PyGILState_LOCKED)
+ PyThreadState_Swap(NULL);
+
+ PyEval_ReleaseLock();
+ }
+
+ /*
+ * Setup Python in the child daemon process. Note that
+ * we ensure that we are now marked as the original
+ * initialiser of the Python interpreter even though
+ * mod_python might have done it, as we will be the one
+ * to cleanup the child daemon process and not
+ * mod_python. We also need to perform the special
+ * Python setup which has to be done after a fork.
+ */
+
+ wsgi_python_initialized = 1;
+ wsgi_python_child_init(wsgi_daemon_pool);
+
+ /*
+ * Update reference to server object in case daemon
+ * process is actually associated with a virtual host.
+ * This way all logging actually goes into the virtual
+ * hosts log file.
+ */
+
+ wsgi_server = daemon->group->server;
+
+ /* Retain a reference to daemon process details. */
+
+ wsgi_daemon_process = daemon;
+
+ /* Create socket wrapper for listener file descriptor. */
+
+ apr_os_sock_put(&daemon->listener, &daemon->group->listener_fd, p);
+
+ /* Run the main routine for the daemon process. */
+
+ wsgi_daemon_main(p, daemon);
+
+ /*
+ * Destroy the pool for the daemon process. This will
+ * have the side affect of also destroying Python.
+ */
+
+ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ "mod_wsgi (pid=%d): Stopping process '%s'.", getpid(),
+ daemon->group->name);
+
+ apr_pool_destroy(wsgi_daemon_pool);
+
+ /* Exit the daemon process when being shutdown. */
+
+ exit(-1);
+ }
+
+ apr_pool_note_subprocess(p, &daemon->process, APR_KILL_AFTER_TIMEOUT);
+ apr_proc_other_child_register(&daemon->process, wsgi_manage_process,
+ daemon, NULL, p);
+
+ return OK;
+}
+
+static int wsgi_start_daemons(apr_pool_t *p)
+{
+ WSGIProcessGroup *entries = NULL;
+ WSGIProcessGroup *entry = NULL;
+ WSGIDaemonProcess *process = NULL;
+
+ int i, j;
+
+ /* Do we need to create any daemon processes. */
+
+ if (!wsgi_daemon_list)
+ return OK;
+
+ /*
+ * Cache references to root server and pool as will need
+ * to access these when restarting daemon process when
+ * they die.
+ */
+
+ wsgi_parent_pool = p;
+
+ /*
+ * Startup in turn the required number of daemon processes
+ * for each of the named process groups.
+ */
+
+ wsgi_daemon_index = apr_hash_make(p);
+
+ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts;
+
+ for (i = 0; i < wsgi_daemon_list->nelts; ++i) {
+ int status;
+
+ entry = &entries[i];
+
+ /*
+ * Calculate path for socket to accept requests on and
+ * create the socket.
+ */
+
+ entry->socket = apr_psprintf(p, "%s.%d.%d.sock",
+ wsgi_server_config->socket_prefix,
+ ap_my_generation, entry->id);
+
+ apr_hash_set(wsgi_daemon_index, entry->name, APR_HASH_KEY_STRING,
+ entry);
+
+ entry->listener_fd = wsgi_setup_socket(entry);
+
+ if (entry->listener_fd == -1)
+ return DECLINED;
+
+ /*
+ * If there is more than one daemon process in the group
+ * then need to create an accept mutex for the daemon
+ * processes to use so they don't interfere with each
+ * other.
+ */
+
+ if (entry->processes > 1) {
+ entry->mutex_path = apr_psprintf(p, "%s.%d.%d.lock",
+ wsgi_server_config->socket_prefix,
+ ap_my_generation, entry->id);
+
+ status = apr_proc_mutex_create(&entry->mutex, entry->mutex_path,
+ wsgi_server_config->lock_mechanism,
+ p);
+
+ if (status != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno), wsgi_server,
+ "mod_wsgi (pid=%d): Couldn't create accept "
+ "lock '%s' (%d).", getpid(), entry->mutex_path,
+ wsgi_server_config->lock_mechanism);
+ return DECLINED;
+ }
+
+ /*
+ * Depending on the locking mechanism being used
+ * need to change the permissions of the lock. Can't
+ * use unixd_set_proc_mutex_perms() as it uses the
+ * default Apache child process uid/gid where the
+ * daemon process uid/gid can be different.
+ */
+
+ if (!geteuid()) {
+#if APR_HAS_SYSVSEM_SERIALIZE
+ if (!strcmp(apr_proc_mutex_name(entry->mutex), "sysvsem")) {
+ apr_os_proc_mutex_t ospmutex;
+#if !APR_HAVE_UNION_SEMUN
+ union semun {
+ long val;
+ struct semid_ds *buf;
+ unsigned short *array;
+ };
+#endif
+ union semun ick;
+ struct semid_ds buf;
+
+ apr_os_proc_mutex_get(&ospmutex, entry->mutex);
+ buf.sem_perm.uid = entry->uid;
+ buf.sem_perm.gid = entry->gid;
+ buf.sem_perm.mode = 0600;
+ ick.buf = &buf;
+ if (semctl(ospmutex.crossproc, 0, IPC_SET, ick) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't set permissions on accept "
+ "mutex '%s' (sysvsem).", getpid(),
+ entry->mutex_path);
+ return DECLINED;
+ }
+ }
+#endif
+#if APR_HAS_FLOCK_SERIALIZE
+ if (!strcmp(apr_proc_mutex_name(entry->mutex), "flock")) {
+ if (chown(entry->mutex_path, entry->uid, -1) < 0) {
+ ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno),
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Couldn't set permissions on accept "
+ "mutex '%s' (flock).", getpid(),
+ entry->mutex_path);
+ return DECLINED;
+ }
+ }
+#endif
+ }
+ }
+
+ /* Create the actual required daemon processes. */
+
+ for (j = 1; j <= entry->processes; j++) {
+ process = (WSGIDaemonProcess *)apr_pcalloc(p, sizeof(
+ WSGIDaemonProcess));
+
+ process->group = entry;
+ process->instance = j;
+
+ status = wsgi_start_process(p, process);
+
+ if (status != OK)
+ return status;
+ }
+ }
+
+ return OK;
+}
+
+static apr_status_t wsgi_close_socket(void *data)
+{
+ WSGIDaemonSocket *daemon = NULL;
+
+ daemon = (WSGIDaemonSocket *)data;
+
+ return close(daemon->fd);
+}
+
+static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon)
+{
+ struct sockaddr_un addr;
+
+ int retries = 0;
+ apr_interval_time_t timer = apr_time_from_sec(0.1);
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ apr_cpystrn(addr.sun_path, daemon->socket, sizeof addr.sun_path);
+
+ while (1) {
+ retries++;
+
+ if ((daemon->fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ "mod_wsgi (pid=%d): Unable to create socket to "
+ "connect to WSGI daemon process.", getpid());
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (connect(daemon->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ if (errno == ECONNREFUSED && retries < WSGI_CONNECT_ATTEMPTS) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ "mod_wsgi (pid=%d): Connection attempt #%d to "
+ "WSGI daemon process '%s' on '%s' failed, "
+ "sleeping before retrying again.", getpid(),
+ retries, daemon->name, daemon->socket);
+
+ close(daemon->fd);
+
+ /* Increase wait time up to maximum of 2 seconds. */
+
+ apr_sleep(timer);
+ if (timer < apr_time_from_sec(2))
+ timer *= 2;
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ "mod_wsgi (pid=%d): Unable to connect to "
+ "WSGI daemon process '%s' on '%s' after "
+ "multiple attempts.", getpid(), daemon->name,
+ daemon->socket);
+
+ close(daemon->fd);
+
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ }
+ else {
+ apr_pool_cleanup_register(r->pool, daemon, wsgi_close_socket,
+ apr_pool_cleanup_null);
+
+ break;
+ }
+ }
+
+ return OK;
+}
+
+static apr_status_t wsgi_socket_send(int fd, const void *buf, size_t buf_size)
+{
+ int rc;
+
+ do {
+ rc = write(fd, buf, buf_size);
+ } while (rc < 0 && errno == EINTR);
+ if (rc < 0) {
+ return errno;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t wsgi_send_string(int fd, const char *s)
+{
+ apr_status_t rv;
+ int l;
+
+ l = strlen(s);
+
+ if ((rv = wsgi_socket_send(fd, &l, sizeof(l))) != APR_SUCCESS)
+ return rv;
+
+ return wsgi_socket_send(fd, s, l);
+}
+
+static apr_status_t wsgi_send_strings(int fd, const char **s)
+{
+ apr_status_t rv;
+ int n;
+ int i;
+
+ for (n = 0; s[n]; n++)
+ continue;
+
+ if ((rv = wsgi_socket_send(fd, &n, sizeof(n))) != APR_SUCCESS)
+ return rv;
+
+ for (i = 0; i < n; i++) {
+ if ((rv = wsgi_send_string(fd, s[i])) != APR_SUCCESS)
+ return rv;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t wsgi_send_request(request_rec *r,
+ WSGIRequestConfig *config,
+ WSGIDaemonSocket *daemon)
+{
+ int rv;
+
+ char **environ;
+ const apr_array_header_t *env_arr;
+ const apr_table_entry_t *elts;
+ int i, j;
+
+ /* Send subprocess environment from request object. */
+
+ env_arr = apr_table_elts(r->subprocess_env);
+ elts = (const apr_table_entry_t *)env_arr->elts;
+
+ environ = (char **)apr_palloc(r->pool,
+ ((2*env_arr->nelts)+1)*sizeof(char *));
+
+ for (i=0, j=0; i<env_arr->nelts; ++i) {
+ if (!elts[i].key)
+ continue;
+
+ environ[j++] = elts[i].key;
+ environ[j++] = elts[i].val ? elts[i].val : "";
+ }
+
+ environ[j] = NULL;
+
+ rv = wsgi_send_strings(daemon->fd, (const char **)environ);
+
+ if (rv != APR_SUCCESS)
+ return rv;
+
+ return APR_SUCCESS;
+}
+
+static void wsgi_discard_script_output(apr_bucket_brigade *bb)
+{
+ apr_bucket *e;
+ const char *buf;
+ apr_size_t len;
+ apr_status_t rv;
+
+ for (e = APR_BRIGADE_FIRST(bb);
+ e != APR_BRIGADE_SENTINEL(bb);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ break;
+ }
+ rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ break;
+ }
+ }
+}
+
+static int wsgi_execute_remote(request_rec *r)
+{
+ WSGIRequestConfig *config = NULL;
+ WSGIDaemonSocket *daemon = NULL;
+ WSGIProcessGroup *group = NULL;
+
+ char const *hash = NULL;
+
+ int status;
+ apr_status_t rv;
+
+ int seen_eos;
+ int child_stopped_reading;
+ apr_file_t *tempsock;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+
+ /* Grab request configuration. */
+
+ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
+ &wsgi_module);
+
+ /*
+ * Only allow the process group to match against a restricted
+ * set of processes if such a restricted set has been defined.
+ */
+
+ if (config->restrict_process) {
+ if (!apr_table_get(config->restrict_process,
+ config->process_group)) {
+ wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon "
+ "process called '%s' cannot be "
+ "accessed by this WSGI application",
+ config->process_group), r->filename);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ /*
+ * Do not process request as remote if actually targeted at
+ * the main Apache processes.
+ */
+
+ if (!*config->process_group)
+ return DECLINED;
+
+ /* Grab details of matching process group. */
+
+ if (!wsgi_daemon_index) {
+ wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon "
+ "process called '%s' has been configured",
+ config->process_group), r->filename);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ group = (WSGIProcessGroup *)apr_hash_get(wsgi_daemon_index,
+ config->process_group,
+ APR_HASH_KEY_STRING);
+
+ if (!group) {
+ wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon "
+ "process called '%s' has been configured",
+ config->process_group), r->filename);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
+ * Only allow the process group to match against a daemon
+ * process defined within a virtual host with the same
+ * server name or a daemon process defined at global server
+ * scope.
+ */
+
+ if (group->server != r->server && group->server != wsgi_server) {
+ if (strcmp(group->server->server_hostname,
+ r->server->server_hostname) != 0) {
+ wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon "
+ "process called '%s' cannot be "
+ "accessed by this WSGI application",
+ config->process_group), r->filename);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+
+ /*
+ * Add magic marker into request environment so that daemon
+ * process can verify that request is from a sender that can
+ * be trusted.
+ */
+
+ hash = apr_psprintf(r->pool, "%ld|%s|%s", group->random,
+ group->socket, r->filename);
+ hash = ap_md5(r->pool, (const unsigned char *)hash);
+
+ apr_table_setn(r->subprocess_env, "mod_wsgi.magic", hash);
+
+ /* Create connection to the daemon process. */
+
+ daemon = (WSGIDaemonSocket *)apr_pcalloc(r->pool,
+ sizeof(WSGIDaemonSocket));
+
+ daemon->name = config->process_group;
+ daemon->socket = group->socket;
+
+ if ((status = wsgi_connect_daemon(r, daemon)) != OK)
+ return status;
+
+ /* Send request details and subprocess environment. */
+
+ if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ "mod_wsgi (pid=%d): Unable to send request details "
+ "to WSGI daemon process '%s' on '%s'.", getpid(),
+ daemon->name, daemon->socket);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
+ * Wrap the socket in an APR file object so that socket can
+ * be more easily written to and so that pipe bucket can be
+ * created later for reading from it. Note we file object is
+ * initialised such that it will close socket when no longer
+ * required so can kill off registration done at higher
+ * level to close socket.
+ */
+
+ apr_os_pipe_put_ex(&tempsock, &daemon->fd, 1, r->pool);
+ apr_pool_cleanup_kill(r->pool, daemon, wsgi_close_socket);
+
+ /* Transfer any request content which was provided. */
+
+ seen_eos = 0;
+ child_stopped_reading = 0;
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ "mod_wsgi (pid=%d): Unable to get bucket brigade "
+ "for request.", getpid());
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ for (bucket = APR_BRIGADE_FIRST(bb);
+ bucket != APR_BRIGADE_SENTINEL(bb);
+ bucket = APR_BUCKET_NEXT(bucket))
+ {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* We can't do much with this. */
+ if (APR_BUCKET_IS_FLUSH(bucket)) {
+ continue;
+ }
+
+ /* If the child stopped, we still must read to EOS. */
+ if (child_stopped_reading) {
+ continue;
+ }
+
+ /* Read block. */
+ apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+
+ /*
+ * Keep writing data to the child until done or too
+ * much time elapses with no progress or an error
+ * occurs. (XXX Does a timeout actually occur?)
+ */
+ rv = apr_file_write_full(tempsock, data, len, NULL);
+
+ if (rv != APR_SUCCESS) {
+ /* Daemon stopped reading, discard remainder. */
+ child_stopped_reading = 1;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ }
+ while (!seen_eos);
+
+ /*
+ * Close socket for writing so that daemon detects end of
+ * request content.
+ */
+
+ shutdown(daemon->fd, 1);
+
+ /* Setup bucket brigade for reading response from daemon. */
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ b = apr_bucket_pipe_create(tempsock, r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(r->connection->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ /* Scan the CGI script like headers from daemon. */
+
+ if ((status = ap_scan_script_header_err_brigade(r, bb, NULL))) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
+ * Look for special case of status being 0 and
+ * translate it into a 500 error so that error
+ * document processing will occur for those cases
+ * where WSGI application wouldn't have supplied
+ * their own error document.
+ */
+
+ if (r->status == 0)
+ return HTTP_INTERNAL_SERVER_ERROR;
+
+ /* Transfer any response content. */
+
+ ap_pass_brigade(r->output_filters, bb);
+
+ return OK;
+}
+
+static apr_status_t wsgi_socket_read(int fd, void *vbuf, size_t buf_size)
+{
+ char *buf = vbuf;
+ int rc;
+ size_t bytes_read = 0;
+
+ do {
+ do {
+ rc = read(fd, buf + bytes_read, buf_size - bytes_read);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+ switch(rc) {
+ case -1:
+ return errno;
+ case 0: /* unexpected */
+ return ECONNRESET;
+ default:
+ bytes_read += rc;
+ }
+ } while (bytes_read < buf_size);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t wsgi_read_string(int fd, char **s, apr_pool_t *p)
+{
+ apr_status_t rv;
+ int l;
+
+ if ((rv = wsgi_socket_read(fd, &l, sizeof(l))) != APR_SUCCESS)
+ return rv;
+
+ *s = apr_pcalloc(p, l+1);
+
+ if (!l)
+ return APR_SUCCESS;
+
+ return wsgi_socket_read(fd, *s, l);
+}
+
+static apr_status_t wsgi_read_strings(int fd, char ***s, apr_pool_t *p)
+{
+ apr_status_t rv;
+ int n;
+ int i;
+
+ if ((rv = wsgi_socket_read(fd, &n, sizeof(n))) != APR_SUCCESS)
+ return rv;
+
+ *s = apr_pcalloc(p, (n+1)*sizeof(**s));
+
+ for (i = 0; i < n; i++) {
+ if ((rv = wsgi_read_string(fd, &(*s)[i], p)) != APR_SUCCESS)
+ return rv;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t wsgi_read_request(int sockfd, request_rec *r)
+{
+ int rv;
+
+ pid_t ppid;
+ char **environ;
+
+ /* Read subprocess environment from request object. */
+
+ rv = wsgi_read_strings(sockfd, &environ, r->pool);
+
+ if (rv != APR_SUCCESS)
+ return rv;
+
+ while (*environ) {
+ char *key = *environ++;
+
+ apr_table_setn(r->subprocess_env, key, *environ++);
+ }
+
+ return APR_SUCCESS;
+}
+
+static ap_filter_rec_t *wsgi_header_filter_handle;
+
+apr_status_t wsgi_header_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+
+ struct iovec vec1[4];
+ apr_bucket_brigade *b2;
+ char crlf[] = CRLF;
+ apr_size_t buflen;
+
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec2;
+ struct iovec *vec2_next;
+
+ /* Output status line. */
+
+ vec1[0].iov_base = (void *)"Status:";
+ vec1[0].iov_len = strlen("Status:");
+ vec1[1].iov_base = (void *)" ";
+ vec1[1].iov_len = sizeof(" ") - 1;
+ vec1[2].iov_base = (void *)(r->status_line);
+ vec1[2].iov_len = strlen(r->status_line);
+ vec1[3].iov_base = (void *)CRLF;
+ vec1[3].iov_len = sizeof(CRLF) - 1;
+
+ b2 = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ apr_brigade_writev(b2, NULL, NULL, vec1, 4);
+
+ /* Merge response header tables together. */
+
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /* Override the content type for response. */
+
+ if (r->content_type)
+ apr_table_setn(r->headers_out, "Content-Type", r->content_type);
+
+ /* Formt the response headers for output. */
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts != 0) {
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec2 = (struct iovec *)apr_palloc(r->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec2_next = vec2;
+
+ do {
+ vec2_next->iov_base = (void*)(t_elt->key);
+ vec2_next->iov_len = strlen(t_elt->key);
+ vec2_next++;
+ vec2_next->iov_base = ": ";
+ vec2_next->iov_len = sizeof(": ") - 1;
+ vec2_next++;
+ vec2_next->iov_base = (void*)(t_elt->val);
+ vec2_next->iov_len = strlen(t_elt->val);
+ vec2_next++;
+ vec2_next->iov_base = CRLF;
+ vec2_next->iov_len = sizeof(CRLF) - 1;
+ vec2_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+ apr_brigade_writev(b2, NULL, NULL, vec2, vec2_next - vec2);
+ }
+
+ /* Format terminating blank line for response headers. */
+
+ buflen = strlen(crlf);
+ apr_brigade_write(b2, NULL, NULL, crlf, buflen);
+
+ /* Output the response headers. */
+
+ ap_pass_brigade(f->next, b2);
+
+ /* Remove ourselves from filter chain so we aren't called again. */
+
+ ap_remove_output_filter(f);
+
+ /* Output the partial response content. */
+
+ return ap_pass_brigade(f->next, b);
+}
+
+static int wsgi_hook_daemon_handler(conn_rec *c)
+{
+ apr_socket_t *csd;
+ int sockfd = -1;
+ request_rec *r;
+ apr_pool_t *p;
+ apr_status_t rv;
+
+ char *key;
+ apr_sockaddr_t *addr;
+
+ char const *magic;
+ char const *hash;
+
+ WSGIRequestConfig *config;
+
+ apr_bucket *e;
+ apr_bucket_brigade *bb;
+
+ /* Don't do anything if not in daemon process. */
+
+ if (!wsgi_daemon_pool)
+ return DECLINED;
+
+ /* Create and populate our own request object. */
+
+ apr_pool_create(&p, c->pool);
+ r = apr_pcalloc(p, sizeof(request_rec));
+
+ r->pool = p;
+ r->connection = c;
+ r->server = c->base_server;
+
+ r->user = NULL;
+ r->ap_auth_type = NULL;
+
+ r->allowed_methods = ap_make_method_list(p, 2);
+
+ r->headers_in = apr_table_make(r->pool, 25);
+ r->subprocess_env = apr_table_make(r->pool, 25);
+ r->headers_out = apr_table_make(r->pool, 12);
+ r->err_headers_out = apr_table_make(r->pool, 5);
+ r->notes = apr_table_make(r->pool, 5);
+
+ r->request_config = ap_create_request_config(r->pool);
+
+ r->proto_output_filters = c->output_filters;
+ r->output_filters = r->proto_output_filters;
+ r->proto_input_filters = c->input_filters;
+ r->input_filters = r->proto_input_filters;
+
+ r->per_dir_config = r->server->lookup_defaults;
+
+ r->sent_bodyct = 0;
+
+ r->read_length = 0;
+ r->read_body = REQUEST_NO_BODY;
+
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ r->the_request = NULL;
+
+ r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
+
+ /*
+ * Install our own output filter for writing back headers in
+ * CGI script style.
+ */
+
+ ap_add_output_filter_handle(wsgi_header_filter_handle,
+ NULL, r, r->connection);
+
+ /* Create and install the WSGI request config. */
+
+ config = (WSGIRequestConfig *)apr_pcalloc(r->pool,
+ sizeof(WSGIRequestConfig));
+ ap_set_module_config(r->request_config, &wsgi_module, (void *)config);
+
+ /*
+ * Stash the socket into the connection core config so
+ * that core input and output filters will work.
+ */
+
+ csd = ap_get_module_config(c->conn_config, &core_module);
+ apr_os_sock_get(&sockfd, csd);
+
+ /* Read in the request details and setup request object. */
+
+ if ((rv = wsgi_read_request(sockfd, r)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(rv), r,
+ "mod_wsgi (pid=%d): Unable to read WSGI request.",
+ getpid());
+
+ apr_pool_destroy(p);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /* Set target of request and recalculate modification time. */
+
+ r->filename = (char *)apr_table_get(r->subprocess_env, "SCRIPT_FILENAME");
+
+ apr_stat(&r->finfo, r->filename, APR_FINFO_SIZE, r->pool);
+
+ /* Check magic marker used to validate origin of request. */
+
+ magic = apr_table_get(r->subprocess_env, "mod_wsgi.magic");
+
+ if (!magic) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ALERT(rv), r,
+ "mod_wsgi (pid=%d): Request origin could not be "
+ "validated.", getpid());
+
+ apr_pool_destroy(p);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ hash = apr_psprintf(r->pool, "%ld|%s|%s",
+ wsgi_daemon_process->group->random,
+ wsgi_daemon_process->group->socket, r->filename);
+ hash = ap_md5(r->pool, (const unsigned char *)hash);
+
+ if (strcmp(magic, hash) != 0) {
+ ap_log_rerror(APLOG_MARK, WSGI_LOG_ALERT(rv), r,
+ "mod_wsgi (pid=%d): Request origin could not be "
+ "validated.", getpid());
+
+ apr_pool_destroy(p);
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ apr_table_unset(r->subprocess_env, "mod_wsgi.magic");
+
+ /*
+ * Trigger mapping of host information to server configuration
+ * so that when logging errors they go to the correct error log
+ * file for the host.
+ */
+
+ r->connection->remote_ip = (char *)apr_table_get(r->subprocess_env,
+ "REMOTE_ADDR");
+
+ key = apr_psprintf(p, "%s|%s",
+ apr_table_get(r->subprocess_env,
+ "mod_wsgi.listener_host"),
+ apr_table_get(r->subprocess_env,
+ "mod_wsgi.listener_port"));
+
+ addr = (apr_sockaddr_t *)apr_hash_get(wsgi_daemon_listeners,
+ key, APR_HASH_KEY_STRING);
+
+ if (addr)
+ c->local_addr = addr;
+
+ ap_update_vhost_given_ip(r->connection);
+
+ r->server = c->base_server;
+
+ if (apr_table_get(r->subprocess_env, "HTTP_HOST")) {
+ apr_table_setn(r->headers_in, "Host",
+ apr_table_get(r->subprocess_env, "HTTP_HOST"));
+ }
+
+ ap_update_vhost_from_headers(r);
+
+ /*
+ * Set content length of any request content and add the
+ * standard HTTP input filter so that standard input routines
+ * for request content will work.
+ */
+
+ if (apr_table_get(r->subprocess_env, "CONTENT_LENGTH")) {
+ apr_table_setn(r->headers_in, "Content-Length",
+ apr_table_get(r->subprocess_env, "CONTENT_LENGTH"));
+ }
+
+ ap_add_input_filter("HTTP_IN", NULL, r, r->connection);
+
+ /* Set details of WSGI specific request config. */
+
+ config->process_group = apr_table_get(r->subprocess_env,
+ "mod_wsgi.process_group");
+ config->application_group = apr_table_get(r->subprocess_env,
+ "mod_wsgi.application_group");
+ config->callable_object = apr_table_get(r->subprocess_env,
+ "mod_wsgi.callable_object");
+
+ config->script_reloading = atoi(apr_table_get(r->subprocess_env,
+ "mod_wsgi.script_reloading"));
+ config->reload_mechanism = atoi(apr_table_get(r->subprocess_env,
+ "mod_wsgi.reload_mechanism"));
+ config->output_buffering = atoi(apr_table_get(r->subprocess_env,
+ "mod_wsgi.output_buffering"));
+ config->case_sensitivity = atoi(apr_table_get(r->subprocess_env,
+ "mod_wsgi.case_sensitivity"));
+
+ /*
+ * Define how input data is to be processed. This
+ * was already down in the Apache child process and
+ * so it shouldn't fail. More importantly, it sets
+ * up request data tracking how much input has been
+ * read or if more remains.
+ */
+
+ ap_setup_client_block(r, REQUEST_CHUNKED_ERROR);
+
+ /*
+ * Execute the actual target WSGI application. In
+ * normal cases OK should always be returned. If
+ * however an error occurs in importing or executing
+ * the script or the Python code raises an exception
+ * which is not caught and handled, then an internal
+ * server error can be returned. As we don't want to
+ * be triggering any error document handlers in the
+ * daemon process we use a fake status line with 0
+ * as the status value. This will be picked up in
+ * the Apache child process which will translate it
+ * back to a 500 error so that normal error document
+ * processing occurs.
+ */
+
+ r->status = HTTP_OK;
+
+ if (wsgi_execute_script(r) != OK) {
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ r->status_line = "0 Internal Server Error";
+ }
+
+ /*
+ * Ensure that request is finalised and any response
+ * is flushed out. This will as a side effect read
+ * any input data which wasn't consumed, thus
+ * ensuring that the Apache child process isn't hung
+ * waiting to send the request content and can
+ * therefore process the response correctly.
+ */
+
+ ap_finalize_request_protocol(r);
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ e = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ ap_pass_brigade(r->connection->output_filters, bb);
+
+ apr_pool_destroy(p);
+
+ return OK;
+}
+
+#endif
+
+/*
+ * Apache 2.X module initialisation functions.
+ */
+
+static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp,
+ apr_pool_t *plog, server_rec *s)
+{
+ void *data = NULL;
+ const char *userdata_key = "wsgi_init";
+ char package[128];
+
+ int status = OK;
+
+ /*
+ * Init function gets called twice during startup, we only
+ * need to actually do anything on the second time it is
+ * called. This avoids unecessarily initialising and then
+ * destroying Python for no reason.
+ */
+
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (!data) {
+ apr_pool_userdata_set((const void *)1, userdata_key,
+ apr_pool_cleanup_null, s->process->pool);
+ return OK;
+ }
+
+ /* Setup module version information. */
+
+ sprintf(package, "mod_wsgi/%d.%d-TRUNK", MOD_WSGI_MAJORVERSION_NUMBER,
+ MOD_WSGI_MINORVERSION_NUMBER);
+
+ ap_add_version_component(pconf, package);
+
+ /* Retain reference to base server. */
+
+ wsgi_server = s;
+
+ /* Retain record of parent process ID. */
+
+ wsgi_parent_pid = getpid();
+
+ /* Determine whether multiprocess and/or multithread. */
+
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &wsgi_multithread);
+ wsgi_multithread = (wsgi_multithread != AP_MPMQ_NOT_SUPPORTED);
+
+ ap_mpm_query(AP_MPMQ_IS_FORKED, &wsgi_multiprocess);
+ if (wsgi_multiprocess != AP_MPMQ_NOT_SUPPORTED) {
+ ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &wsgi_multiprocess);
+ wsgi_multiprocess = (wsgi_multiprocess != 1);
+ }
+
+ /* Retain reference to main server config. */
+
+ wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module);
+
+ /* Initialise Python if not already done. */
+
+ wsgi_python_init(pconf);
+
+ /* Startup separate named daemon processes. */
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ status = wsgi_start_daemons(pconf);
+#endif
+
+ return status;
+}
+
+static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s)
+{
+ wsgi_python_child_init(p);
+}
+
+static void wsgi_register_hooks(apr_pool_t *p)
+{
+ static const char * const prev[] = { "mod_alias.c", NULL };
+ static const char * const next[]= { "mod_userdir.c",
+ "mod_vhost_alias.c", NULL };
+
+ /*
+ * Perform initialisation last in the post config phase to
+ * ensure that if mod_python is also being loaded that it
+ * gets to perform interpreter initialisation in preference
+ * to mod_wsgi doing it.
+ */
+
+ ap_hook_post_config(wsgi_hook_init, NULL, NULL, APR_HOOK_LAST);
+ ap_hook_child_init(wsgi_hook_child_init, NULL, NULL, APR_HOOK_MIDDLE);
+
+ ap_hook_translate_name(wsgi_hook_intercept, prev, next, APR_HOOK_MIDDLE);
+ ap_hook_handler(wsgi_hook_handler, NULL, NULL, APR_HOOK_MIDDLE);
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ ap_hook_process_connection(wsgi_hook_daemon_handler, NULL, NULL,
+ APR_HOOK_REALLY_FIRST);
+
+ wsgi_header_filter_handle =
+ ap_register_output_filter("WSGI_HEADER", wsgi_header_filter,
+ NULL, AP_FTYPE_PROTOCOL);
+#endif
+}
+
+static const command_rec wsgi_commands[] =
+{
+ AP_INIT_TAKE2("WSGIScriptAlias", wsgi_add_script_alias, NULL,
+ RSRC_CONF, "Map location to target WSGI script file."),
+ AP_INIT_TAKE2("WSGIScriptAliasMatch", wsgi_add_script_alias, "*",
+ RSRC_CONF, "Map location pattern to target WSGI script file."),
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ AP_INIT_RAW_ARGS("WSGIDaemonProcess", wsgi_add_daemon_process, NULL,
+ RSRC_CONF, "Specify details of daemon processes to start."),
+ AP_INIT_TAKE1("WSGISocketPrefix", wsgi_set_socket_prefix, NULL,
+ RSRC_CONF, "Path prefix for the daemon process sockets."),
+ AP_INIT_TAKE1("WSGIAcceptMutex", wsgi_set_accept_mutex, NULL,
+ RSRC_CONF, "Set accept mutex type for daemon processes."),
+#endif
+
+ AP_INIT_TAKE1("WSGIPythonOptimize", wsgi_set_python_optimize, NULL,
+ RSRC_CONF, "Set level of Python compiler optimisations."),
+#ifndef WIN32
+ AP_INIT_TAKE1("WSGIPythonExecutable", wsgi_set_python_executable, NULL,
+ RSRC_CONF, "Python executable absolute path name."),
+ AP_INIT_TAKE1("WSGIPythonHome", wsgi_set_python_home, NULL,
+ RSRC_CONF, "Python prefix/exec_prefix absolute path names."),
+ AP_INIT_TAKE1("WSGIPythonPath", wsgi_set_python_path, NULL,
+ RSRC_CONF, "Python module search path."),
+#endif
+
+ AP_INIT_TAKE1("WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL,
+ RSRC_CONF, "Enable/Disable restrictions on use of STDIN."),
+ AP_INIT_TAKE1("WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL,
+ RSRC_CONF, "Enable/Disable restrictions on use of STDOUT."),
+ AP_INIT_TAKE1("WSGIRestrictSignal", wsgi_set_restrict_signal, NULL,
+ RSRC_CONF, "Enable/Disable restrictions on use of signal()."),
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ AP_INIT_RAW_ARGS("WSGIRestrictProcess", wsgi_set_restrict_process, NULL,
+ ACCESS_CONF|RSRC_CONF, "Limit selectable WSGI process groups."),
+ AP_INIT_TAKE1("WSGIProcessGroup", wsgi_set_process_group, NULL,
+ ACCESS_CONF|RSRC_CONF, "Name of the WSGI process group."),
+#endif
+
+ AP_INIT_TAKE1("WSGIApplicationGroup", wsgi_set_application_group, NULL,
+ ACCESS_CONF|RSRC_CONF, "Name of WSGI application group."),
+ AP_INIT_TAKE1("WSGICallableObject", wsgi_set_callable_object, NULL,
+ OR_FILEINFO, "Name of entry point in WSGI script file."),
+
+ AP_INIT_TAKE1("WSGIPassAuthorization", wsgi_set_pass_authorization, NULL,
+ ACCESS_CONF|RSRC_CONF, "Enable/Disable WSGI authorization."),
+ AP_INIT_TAKE1("WSGIScriptReloading", wsgi_set_script_reloading, NULL,
+ OR_FILEINFO, "Enable/Disable script reloading mechanism."),
+ AP_INIT_TAKE1("WSGIReloadMechanism", wsgi_set_reload_mechanism, NULL,
+ OR_FILEINFO, "Defines what is reloaded when a reload occurs."),
+ AP_INIT_TAKE1("WSGIOutputBuffering", wsgi_set_output_buffering, NULL,
+ OR_FILEINFO, "Enable/Disable buffering of response."),
+ AP_INIT_TAKE1("WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL,
+ OR_FILEINFO, "Define whether file system is case sensitive."),
+
+ { NULL }
+};
+
+/* Dispatch list for API hooks */
+
+module AP_MODULE_DECLARE_DATA wsgi_module = {
+ STANDARD20_MODULE_STUFF,
+ wsgi_create_dir_config, /* create per-dir config structures */
+ wsgi_merge_dir_config, /* merge per-dir config structures */
+ wsgi_create_server_config, /* create per-server config structures */
+ wsgi_merge_server_config, /* merge per-server config structures */
+ wsgi_commands, /* table of config file commands */
+ wsgi_register_hooks /* register hooks */
+};
+
+#endif