summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGraham Dumpleton <Graham.Dumpleton@gmail.com>2014-01-05 21:57:37 +1100
committerGraham Dumpleton <Graham.Dumpleton@gmail.com>2014-01-05 21:57:37 +1100
commit73955b8f6175f763f86ce460e4a37a0d01afa765 (patch)
tree43df1d1d0ab5c81799a4280d66cb62908d9ac1ac
parente2ccb28ffa486a48bf7dce72f958e111cf0e9fd9 (diff)
downloadmod_wsgi-73955b8f6175f763f86ce460e4a37a0d01afa765.tar.gz
Recover experimental changes after abandoning prior mod_wsgi 4.0 development.
-rw-r--r--.gitignore20
-rw-r--r--.hgignore11
-rw-r--r--MANIFEST.in10
-rw-r--r--Makefile.in (renamed from posix-ap2X.mk.in)30
-rw-r--r--README66
-rwxr-xr-xconfigure3304
-rw-r--r--configure.ac37
-rw-r--r--posix-ap1X.mk.in46
-rw-r--r--setup.py143
-rw-r--r--src/__init__.py2
-rw-r--r--src/server/__init__.py926
-rw-r--r--src/server/management/__init__.py0
-rw-r--r--src/server/management/commands/__init__.py0
-rw-r--r--src/server/management/commands/runapache.py51
-rw-r--r--src/server/mod_wsgi.c (renamed from mod_wsgi.c)5644
-rw-r--r--src/server/wsgi_apache.c161
-rw-r--r--src/server/wsgi_apache.h136
-rw-r--r--src/server/wsgi_buckets.c174
-rw-r--r--src/server/wsgi_buckets.h40
-rw-r--r--src/server/wsgi_convert.c161
-rw-r--r--src/server/wsgi_convert.h36
-rw-r--r--src/server/wsgi_daemon.c39
-rw-r--r--src/server/wsgi_daemon.h181
-rw-r--r--src/server/wsgi_interp.c1983
-rw-r--r--src/server/wsgi_interp.h78
-rw-r--r--src/server/wsgi_logger.c689
-rw-r--r--src/server/wsgi_logger.h40
-rw-r--r--src/server/wsgi_metrics.c94
-rw-r--r--src/server/wsgi_metrics.h43
-rw-r--r--src/server/wsgi_python.h113
-rw-r--r--src/server/wsgi_restrict.c98
-rw-r--r--src/server/wsgi_restrict.h43
-rw-r--r--src/server/wsgi_server.c132
-rw-r--r--src/server/wsgi_server.h125
-rw-r--r--src/server/wsgi_stream.c255
-rw-r--r--src/server/wsgi_stream.h41
-rw-r--r--src/server/wsgi_validate.c172
-rw-r--r--src/server/wsgi_validate.h36
-rw-r--r--src/server/wsgi_version.h35
-rw-r--r--tests/environ.wsgi44
-rw-r--r--tests/hello.wsgi9
-rw-r--r--tox.ini2
-rw-r--r--win32/ap22py26.mk (renamed from win32-ap22py26.mk)6
-rw-r--r--win32/ap22py27.mk (renamed from win32-ap22py27.mk)6
-rw-r--r--win32/ap22py31.mk (renamed from win32-ap22py31.mk)6
45 files changed, 9825 insertions, 5443 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..61d9ea9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,20 @@
+Makefile
+autom4te.cache
+config.log
+config.status
+.libs
+*.la
+*.lo
+*.loT
+*.slo
+build
+dist
+apxs_config.py
+*.egg-info
+*.swp
+bin
+include
+lib
+.Python
+*.pyc
+.tox
diff --git a/.hgignore b/.hgignore
deleted file mode 100644
index 8a8fd49..0000000
--- a/.hgignore
+++ /dev/null
@@ -1,11 +0,0 @@
-syntax: glob
-
-Makefile
-Makefile.in
-autom4te.cache
-config.log
-config.status
-.libs
-*.la
-*.lo
-*.slo
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..9e5caa3
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,10 @@
+include configure.ac
+include configure
+include LICENCE
+include Makefile.in
+include README
+include src/server/*.h
+include src/server/*.c
+exclude src/server/apxs_config.py
+include tests/*
+include win32/*
diff --git a/posix-ap2X.mk.in b/Makefile.in
index 195cdea..f5268b5 100644
--- a/posix-ap2X.mk.in
+++ b/Makefile.in
@@ -1,4 +1,4 @@
-# Copyright 2007 GRAHAM DUMPLETON
+# Copyright 2007-2011 GRAHAM DUMPLETON
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,25 +23,37 @@ CFLAGS = @CFLAGS@
LDFLAGS = @LDFLAGS@
LDLIBS = @LDLIBS@
-all : mod_wsgi.la
+SRCFILES = src/server/mod_wsgi.c src/server/wsgi_*.c
-mod_wsgi.la : mod_wsgi.c
- $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS)
+all : src/server/mod_wsgi.la
+
+src/server/mod_wsgi.la : $(SRCFILES)
+ $(APXS) -c $(CPPFLAGS) $(CFLAGS) $(SRCFILES) $(LDFLAGS) $(LDLIBS)
$(DESTDIR)$(LIBEXECDIR) :
mkdir -p $@
install : all $(DESTDIR)$(LIBEXECDIR)
- $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' mod_wsgi.la
+ $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' src/server/mod_wsgi.la
clean :
- -rm -rf .libs
- -rm -f mod_wsgi.o mod_wsgi.la mod_wsgi.lo mod_wsgi.slo mod_wsgi.loT
- -rm -f config.log config.status
+ -rm -rf src/server/.libs
+ -rm -f src/server/*.o
+ -rm -f src/server/*.la
+ -rm -f src/server/*.lo
+ -rm -f src/server/*.slo
+ -rm -f src/server/*.loT
+ -rm -f config.log
+ -rm -f config.status
-rm -rf autom4te.cache
+ -rm -rf mod_wsgi.egg-info
+ -rm -rf build
+ -rm -rf dist
distclean : clean
- -rm -f Makefile Makefile.in
+ -rm -f Makefile
+ -rm -rf .Python bin lib include
+ -rm -rf .tox
realclean : distclean
-rm -f configure
diff --git a/README b/README
index e21016c..458fc4f 100644
--- a/README
+++ b/README
@@ -2,7 +2,7 @@
Welcome to MOD_WSGI
===================
-Copyright 2007-2010 GRAHAM DUMPLETON
+Copyright 2007-2014 GRAHAM DUMPLETON
The mod_wsgi adapter is an Apache module that provides a WSGI compliant
interface for hosting Python based web applications within Apache. The
@@ -10,9 +10,10 @@ adapter is written completely in C code against the Apache C runtime and
for hosting WSGI applications within Apache has a lower overhead than using
existing WSGI adapters for mod_python or CGI.
-The package can be compiled for and used with either Apache 1.3, 2.0 or 2.2.
-On UNIX systems, either the single threaded 'prefork' or multithreaded
-'worker' Apache MPMs can be used.
+The package can be compiled for and used with either Apache 2.0 or 2.2.
+Apache 1.3 is no longer supported with more recent versions of mod_wsgi. If
+you are still using Apache 1.3 you will need to use a version of mod_wsgi
+from prior to mod_wsgi 4.0.
Note that 'daemon' mode of mod_wsgi is only available if Apache 2.0 or 2.2
is being used on a UNIX platform and where the Apache run time library
@@ -25,8 +26,7 @@ Apache processes, Python should also have been compiled with shared library
support enabled. A number of Python binary packages for Linux systems are
not compiled with shared library support enabled. You should therefore
consider recompiling Python from source code with shared library support
-enabled. If a shared library is not used, you will have problems trying
-to use mod_wsgi on a server where mod_python is also being loaded.
+enabled.
If using a Python binary package for a Linux system, also ensure that the
you have the corresponding 'dev' package installed for the Python package
@@ -151,10 +151,10 @@ specific versions of "apxs2" is not used, compilation will fail due to not
being able to find the "mpm.h" header file.
Note that any of the major Apache versions should be able to be used, ie.,
-all of Apache 1.3, 2.0 and 2.2 should be compatible with this package. You
-will however need to compile the package separately against each version
-and use the resultant Apache module only with the version it was compiled
-for. Which ever version of Apache is used however, it must support dynamic
+all of Apache 2.0 and 2.2 should be compatible with this package. You will
+however need to compile the package separately against each version and use
+the resultant Apache module only with the version it was compiled for.
+Which ever version of Apache is used however, it must support dynamic
loading of Apache modules.
If you have multiple versions of Python installed and you are not using
@@ -190,11 +190,9 @@ dictated by how your operating system distribution structures the
configuration files and modules for Apache, you will need to copy the file
manually into place.
-If you are using Apache 1.3 the compiled Apache module can be found in the
-same directory as this "README" file and is called "mod_wsgi.so". If you
-are using Apache 2.X the compiled Apache module can be found in the ".libs"
-subdirectory and is again called "mod_wsgi.so". The name of the file should
-be kept the same when copied into its appropriate location.
+The compiled Apache module can be found in the ".libs" subdirectory and is
+again called "mod_wsgi.so". The name of the file should be kept the same
+when copied into its appropriate location.
To cleanup after installation, run:
@@ -234,29 +232,11 @@ located, or a path expressed relative to the root of your Apache
installation. If you used "make" to install the package, see where it
copied the file to work out what to set this value to.
-With Apache 1.3, it would also be necessary to add a line of the form:
-
- AddModule mod_wsgi.c
-
-For Apache 1.3, the LoadModule and AddModule lines related to mod_wsgi
-must appear prior to those for the Apache code module called mod_alias.
-
-If you wish to use mod_python at the same time as mod_wsgi, then mod_python
-must be compiled against the same version of Python. In addition to the
-same version of Python being used, you should use a version of Python which
-has been configured and compiled so as to generate a shared library for
-Python. If you do not do this and either mod_python or mod_wsgi are
-compiled against a static library for Python, it is likely that either
-mod_python or mod_wsgi will crash Apache when used.
-
-Note that this is not the fault of either mod_python or mod_wsgi but arises
-purely because your Python installation isn't using a shared library for
-the Python library. The result of such a configuration means that there are
-actually two copies of the Python static library objects in memory at the
-same time and this can cause problems. Linux distributions where this is
-known to be a problem are any of the RedHat derived distributions. Other
-distributions such as Ubuntu do not have a problem as they use a shared
-library for the Python library.
+Note that from mod_wsgi 4.0 onwards, the ability to load mod_python into
+the same Apache is no longer supported. If this is still attempted then
+mod_wsgi will raise an error on startup causing Apache to fail to start
+up. If you still need to use mod_python at the same time, you will need
+to use a version of mod_wsgi prior to mod_wsgi 4.0.
Having adding the required directives you should perform a restart of
Apache to check everything is okay.
@@ -289,18 +269,10 @@ a script which performs additional actions.
If all is okay, you should see a line of the form:
- Apache/2.2.2 (Unix) mod_wsgi/1.0 Python/2.3.5 configured
+ Apache/2.2.2 (Unix) mod_wsgi/4.0 Python/2.6.1 configured
in the Apache error log file.
-If Apache is configured to also load mod_python, it would instead be:
-
- Apache/2.2.2 (Unix) mod_python/3.3.1 Python/2.3.5 mod_wsgi/1.0 configured
-
-That "Python" is listed before "mod_wsgi" is indicative of the fact that
-when both modules are being loaded, mod_wsgi will leave it up to mod_python
-to initialise Python.
-
Note that mod_wsgi logs various detailed information about interpreter
creation, script loading and reloading etc, but it logs with log level of
'info'. As the default for the Apache LogLevel directive is usually 'warn',
diff --git a/configure b/configure
index 23649ee..7f946e4 100755
--- a/configure
+++ b/configure
@@ -1,60 +1,81 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.61.
+# Generated by GNU Autoconf 2.69.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
# This configure script is free software; the Free Software Foundation
# gives unlimited permission to copy, distribute and modify it.
-## --------------------- ##
-## M4sh Initialization. ##
-## --------------------- ##
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
# Be more Bourne compatible
DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
NULLCMD=:
- # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
- case `(set -o) 2>/dev/null` in
- *posix*) set -o posix ;;
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
esac
-
fi
-
-
-# PATH needs CR
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
- echo "#! /bin/sh" >conf$$.sh
- echo "exit 0" >>conf$$.sh
- chmod +x conf$$.sh
- if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
- PATH_SEPARATOR=';'
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
else
- PATH_SEPARATOR=:
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
fi
- rm -f conf$$.sh
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
fi
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
- as_unset=unset
-else
- as_unset=false
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
fi
@@ -63,20 +84,19 @@ fi
# there to prevent editors from complaining about space-tab.
# (If _AS_PATH_WALK were called with IFS unset, it would disable word
# splitting by setting IFS to empty value.)
-as_nl='
-'
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
-case $0 in
+as_myself=
+case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
IFS=$as_save_IFS
;;
@@ -87,354 +107,365 @@ if test "x$as_myself" = x; then
as_myself=$0
fi
if test ! -f "$as_myself"; then
- echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
- { (exit 1); exit 1; }
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
fi
-# Work around bugs in pre-3.0 UWIN ksh.
-for as_var in ENV MAIL MAILPATH
-do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
done
PS1='$ '
PS2='> '
PS4='+ '
# NLS nuisances.
-for as_var in \
- LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
- LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
- LC_TELEPHONE LC_TIME
-do
- if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
- eval $as_var=C; export $as_var
- else
- ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
- fi
-done
-
-# Required to use basename.
-if expr a : '\(a\)' >/dev/null 2>&1 &&
- test "X`expr 00001 : '.*\(...\)'`" = X001; then
- as_expr=expr
-else
- as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
- as_basename=basename
-else
- as_basename=false
-fi
-
-
-# Name of the executable.
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
- X"$0" : 'X\(//\)$' \| \
- X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X/"$0" |
- sed '/^.*\/\([^/][^/]*\)\/*$/{
- s//\1/
- q
- }
- /^X\/\(\/\/\)$/{
- s//\1/
- q
- }
- /^X\/\(\/\).*/{
- s//\1/
- q
- }
- s/.*/./; q'`
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
# CDPATH.
-$as_unset CDPATH
-
-
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
if test "x$CONFIG_SHELL" = x; then
- if (eval ":") 2>/dev/null; then
- as_have_required=yes
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
else
- as_have_required=no
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
fi
-
- if test $as_have_required = yes && (eval ":
-(as_func_return () {
- (exit \$1)
-}
-as_func_success () {
- as_func_return 0
-}
-as_func_failure () {
- as_func_return 1
-}
-as_func_ret_success () {
- return 0
-}
-as_func_ret_failure () {
- return 1
-}
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
exitcode=0
-if as_func_success; then
- :
-else
- exitcode=1
- echo as_func_success failed.
-fi
-
-if as_func_failure; then
- exitcode=1
- echo as_func_failure succeeded.
-fi
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
-if as_func_ret_success; then
- :
else
- exitcode=1
- echo as_func_ret_success failed.
+ exitcode=1; echo positional parameters were not saved.
fi
-
-if as_func_ret_failure; then
- exitcode=1
- echo as_func_ret_failure succeeded.
-fi
-
-if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
- :
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
else
- exitcode=1
- echo positional parameters were not saved.
+ as_have_required=no
fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
-test \$exitcode = 0) || { (exit 1); exit 1; }
-
-(
- as_lineno_1=\$LINENO
- as_lineno_2=\$LINENO
- test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
- test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
-") 2> /dev/null; then
- :
-else
- as_candidate_shells=
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- case $as_dir in
+ as_found=:
+ case $as_dir in #(
/*)
for as_base in sh bash ksh sh5; do
- as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
done;;
esac
+ as_found=false
done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
IFS=$as_save_IFS
- for as_shell in $as_candidate_shells $SHELL; do
- # Try only shells that exist, to save several forks.
- if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
- { ("$as_shell") 2> /dev/null <<\_ASEOF
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
- emulate sh
- NULLCMD=:
- # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
- # is contrary to our usage. Disable this feature.
- alias -g '${1+"$@"}'='"$@"'
- setopt NO_GLOB_SUBST
-else
- case `(set -o) 2>/dev/null` in
- *posix*) set -o posix ;;
-esac
-
-fi
-
-
-:
-_ASEOF
-}; then
- CONFIG_SHELL=$as_shell
- as_have_required=yes
- if { "$as_shell" 2> /dev/null <<\_ASEOF
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
- emulate sh
- NULLCMD=:
- # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
- # is contrary to our usage. Disable this feature.
- alias -g '${1+"$@"}'='"$@"'
- setopt NO_GLOB_SUBST
-else
- case `(set -o) 2>/dev/null` in
- *posix*) set -o posix ;;
+ if test "x$CONFIG_SHELL" != x; then :
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
esac
-
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
fi
-
-:
-(as_func_return () {
- (exit $1)
-}
-as_func_success () {
- as_func_return 0
-}
-as_func_failure () {
- as_func_return 1
-}
-as_func_ret_success () {
- return 0
-}
-as_func_ret_failure () {
- return 1
-}
-
-exitcode=0
-if as_func_success; then
- :
-else
- exitcode=1
- echo as_func_success failed.
-fi
-
-if as_func_failure; then
- exitcode=1
- echo as_func_failure succeeded.
-fi
-
-if as_func_ret_success; then
- :
-else
- exitcode=1
- echo as_func_ret_success failed.
-fi
-
-if as_func_ret_failure; then
- exitcode=1
- echo as_func_ret_failure succeeded.
-fi
-
-if ( set x; as_func_ret_success y && test x = "$1" ); then
- :
-else
- exitcode=1
- echo positional parameters were not saved.
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+ fi
+ exit 1
fi
-
-test $exitcode = 0) || { (exit 1); exit 1; }
-
-(
- as_lineno_1=$LINENO
- as_lineno_2=$LINENO
- test "x$as_lineno_1" != "x$as_lineno_2" &&
- test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
-
-_ASEOF
-}; then
- break
fi
-
fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
- done
-
- if test "x$CONFIG_SHELL" != x; then
- for as_var in BASH_ENV ENV
- do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
- done
- export CONFIG_SHELL
- exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
-fi
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
- if test $as_have_required = no; then
- echo This script requires a shell more modern than all the
- echo shells that I found on your system. Please install a
- echo modern shell, or manually run the script under such a
- echo shell if you do have one.
- { (exit 1); exit 1; }
-fi
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
-fi
-fi
+} # as_fn_mkdir_p
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
-(eval "as_func_return () {
- (exit \$1)
-}
-as_func_success () {
- as_func_return 0
-}
-as_func_failure () {
- as_func_return 1
-}
-as_func_ret_success () {
- return 0
-}
-as_func_ret_failure () {
- return 1
-}
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
-exitcode=0
-if as_func_success; then
- :
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
else
- exitcode=1
- echo as_func_success failed.
-fi
-
-if as_func_failure; then
- exitcode=1
- echo as_func_failure succeeded.
+ as_expr=false
fi
-if as_func_ret_success; then
- :
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
else
- exitcode=1
- echo as_func_ret_success failed.
-fi
-
-if as_func_ret_failure; then
- exitcode=1
- echo as_func_ret_failure succeeded.
+ as_basename=false
fi
-if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
- :
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
else
- exitcode=1
- echo positional parameters were not saved.
+ as_dirname=false
fi
-test \$exitcode = 0") || {
- echo No shell found that supports shell functions.
- echo Please tell autoconf@gnu.org about your system,
- echo including any error possibly output before this
- echo message
-}
-
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
- as_lineno_1=$LINENO
- as_lineno_2=$LINENO
- test "x$as_lineno_1" != "x$as_lineno_2" &&
- test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
- # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
- # uniformly replaced by the line number. The first 'sed' inserts a
- # line-number line after each line using $LINENO; the second 'sed'
- # does the real work. The second script uses 'N' to pair each
- # line-number line with the line containing $LINENO, and appends
- # trailing '-' during substitution so that $LINENO is not a special
- # case at line end.
- # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
- # scripts with optimization help from Paolo Bonzini. Blame Lee
- # E. McMahon (1931-1989) for sed's syntax. :-)
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
sed -n '
p
/[$]LINENO/=
@@ -451,9 +482,12 @@ test \$exitcode = 0") || {
s/-\n.*//
' >$as_me.lineno &&
chmod +x "$as_me.lineno" ||
- { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
- { (exit 1); exit 1; }; }
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
@@ -462,84 +496,55 @@ test \$exitcode = 0") || {
exit
}
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
- as_dirname=dirname
-else
- as_dirname=false
-fi
-
ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in
+case `echo -n x` in #(((((
-n*)
- case `echo 'x\c'` in
+ case `echo 'xy\c'` in
*c*) ECHO_T=' ';; # ECHO_T is single tab character.
- *) ECHO_C='\c';;
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
esac;;
*)
ECHO_N='-n';;
esac
-if expr a : '\(a\)' >/dev/null 2>&1 &&
- test "X`expr 00001 : '.*\(...\)'`" = X001; then
- as_expr=expr
-else
- as_expr=false
-fi
-
rm -f conf$$ conf$$.exe conf$$.file
if test -d conf$$.dir; then
rm -f conf$$.dir/conf$$.file
else
rm -f conf$$.dir
- mkdir conf$$.dir
-fi
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
- as_ln_s='ln -s'
- # ... but there are two gotchas:
- # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
- # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
- ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
-elif ln conf$$.file conf$$ 2>/dev/null; then
- as_ln_s=ln
-else
- as_ln_s='cp -p'
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
if mkdir -p . 2>/dev/null; then
- as_mkdir_p=:
+ as_mkdir_p='mkdir -p "$as_dir"'
else
test -d ./-p && rmdir ./-p
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -548,11 +553,11 @@ as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-exec 7<&0 </dev/null 6>&1
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
# so uname gets run too.
ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
@@ -567,7 +572,6 @@ cross_compiling=no
subdirs=
MFLAGS=
MAKEFLAGS=
-SHELL=${CONFIG_SHELL-/bin/sh}
# Identity of this package.
PACKAGE_NAME=
@@ -575,63 +579,83 @@ PACKAGE_TARNAME=
PACKAGE_VERSION=
PACKAGE_STRING=
PACKAGE_BUGREPORT=
+PACKAGE_URL=
-ac_unique_file="mod_wsgi.c"
-ac_subst_vars='SHELL
-PATH_SEPARATOR
-PACKAGE_NAME
-PACKAGE_TARNAME
-PACKAGE_VERSION
-PACKAGE_STRING
-PACKAGE_BUGREPORT
-exec_prefix
-prefix
-program_transform_name
-bindir
-sbindir
-libexecdir
-datarootdir
-datadir
-sysconfdir
-sharedstatedir
-localstatedir
-includedir
-oldincludedir
-docdir
-infodir
-htmldir
-dvidir
-pdfdir
-psdir
-libdir
-localedir
-mandir
-DEFS
-ECHO_C
-ECHO_N
-ECHO_T
-LIBS
-build_alias
-host_alias
-target_alias
-APXS
+ac_unique_file="src/server/mod_wsgi.c"
+ac_subst_vars='LTLIBOBJS
+LIBOBJS
+LIBEXECDIR
+LDLIBS
PYTHON
+OBJEXT
+EXEEXT
+ac_ct_CC
CPPFLAGS
-CFLAGS
LDFLAGS
-LDLIBS
-LIBEXECDIR
-LIBOBJS
-LTLIBOBJS'
+CFLAGS
+CC
+APXS
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_framework
+enable_embedded
+with_apxs
+with_python
+'
ac_precious_vars='build_alias
host_alias
-target_alias'
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS'
# Initialize some variables set by options.
ac_init_help=
ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
# The variables have the same names as the options, with
# dashes changed to underlines.
cache_file=/dev/null
@@ -687,8 +711,9 @@ do
fi
case $ac_option in
- *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
- *) ac_optarg=yes ;;
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
esac
# Accept the important Cygnus configure options, so we can diagnose typos.
@@ -730,13 +755,20 @@ do
datarootdir=$ac_optarg ;;
-disable-* | --disable-*)
- ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
# Reject names that are not valid shell variable names.
- expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
- { echo "$as_me: error: invalid feature name: $ac_feature" >&2
- { (exit 1); exit 1; }; }
- ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
- eval enable_$ac_feature=no ;;
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
-docdir | --docdir | --docdi | --doc | --do)
ac_prev=docdir ;;
@@ -749,13 +781,20 @@ do
dvidir=$ac_optarg ;;
-enable-* | --enable-*)
- ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
# Reject names that are not valid shell variable names.
- expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
- { echo "$as_me: error: invalid feature name: $ac_feature" >&2
- { (exit 1); exit 1; }; }
- ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
- eval enable_$ac_feature=\$ac_optarg ;;
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
-exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
| --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
@@ -946,22 +985,36 @@ do
ac_init_version=: ;;
-with-* | --with-*)
- ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
# Reject names that are not valid shell variable names.
- expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
- { echo "$as_me: error: invalid package name: $ac_package" >&2
- { (exit 1); exit 1; }; }
- ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
- eval with_$ac_package=\$ac_optarg ;;
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
-without-* | --without-*)
- ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
# Reject names that are not valid shell variable names.
- expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
- { echo "$as_me: error: invalid package name: $ac_package" >&2
- { (exit 1); exit 1; }; }
- ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
- eval with_$ac_package=no ;;
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
--x)
# Obsolete; use --with-x.
@@ -981,26 +1034,26 @@ do
| --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
x_libraries=$ac_optarg ;;
- -*) { echo "$as_me: error: unrecognized option: $ac_option
-Try \`$0 --help' for more information." >&2
- { (exit 1); exit 1; }; }
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
;;
*=*)
ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
# Reject names that are not valid shell variable names.
- expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
- { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
- { (exit 1); exit 1; }; }
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
eval $ac_envvar=\$ac_optarg
export $ac_envvar ;;
*)
# FIXME: should be removed in autoconf 3.0.
- echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
- echo "$as_me: WARNING: invalid host type: $ac_option" >&2
- : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
;;
esac
@@ -1008,23 +1061,36 @@ done
if test -n "$ac_prev"; then
ac_option=--`echo $ac_prev | sed 's/_/-/g'`
- { echo "$as_me: error: missing argument to $ac_option" >&2
- { (exit 1); exit 1; }; }
+ as_fn_error $? "missing argument to $ac_option"
fi
-# Be sure to have absolute directory names.
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir
do
eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
case $ac_val in
[\\/$]* | ?:[\\/]* ) continue;;
NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
esac
- { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
- { (exit 1); exit 1; }; }
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
done
# There might be people who depend on the old broken behavior: `$host'
@@ -1038,8 +1104,6 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used." >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1054,23 +1118,21 @@ test "$silent" = yes && exec 6>/dev/null
ac_pwd=`pwd` && test -n "$ac_pwd" &&
ac_ls_di=`ls -di .` &&
ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
- { echo "$as_me: error: Working directory cannot be determined" >&2
- { (exit 1); exit 1; }; }
+ as_fn_error $? "working directory cannot be determined"
test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
- { echo "$as_me: error: pwd does not report name of working directory" >&2
- { (exit 1); exit 1; }; }
+ as_fn_error $? "pwd does not report name of working directory"
# Find the source files, if location was not specified.
if test -z "$srcdir"; then
ac_srcdir_defaulted=yes
# Try the directory containing this script, then the parent directory.
- ac_confdir=`$as_dirname -- "$0" ||
-$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
- X"$0" : 'X\(//\)[^/]' \| \
- X"$0" : 'X\(//\)$' \| \
- X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$0" |
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
@@ -1097,13 +1159,11 @@ else
fi
if test ! -r "$srcdir/$ac_unique_file"; then
test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
- { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
- { (exit 1); exit 1; }; }
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
fi
ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
ac_abs_confdir=`(
- cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2
- { (exit 1); exit 1; }; }
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
pwd)`
# When building in place, set srcdir=.
if test "$ac_abs_confdir" = "$ac_pwd"; then
@@ -1143,7 +1203,7 @@ Configuration:
--help=short display options specific to this package
--help=recursive display the short help of all the included packages
-V, --version display version information and exit
- -q, --quiet, --silent do not print \`checking...' messages
+ -q, --quiet, --silent do not print \`checking ...' messages
--cache-file=FILE cache test results in FILE [disabled]
-C, --config-cache alias for \`--cache-file=config.cache'
-n, --no-create do not create output files
@@ -1151,9 +1211,9 @@ Configuration:
Installation directories:
--prefix=PREFIX install architecture-independent files in PREFIX
- [$ac_default_prefix]
+ [$ac_default_prefix]
--exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
- [PREFIX]
+ [PREFIX]
By default, \`make install' will install all the files in
\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
@@ -1163,25 +1223,25 @@ for instance \`--prefix=\$HOME'.
For better control, use the options below.
Fine tuning of the installation directories:
- --bindir=DIR user executables [EPREFIX/bin]
- --sbindir=DIR system admin executables [EPREFIX/sbin]
- --libexecdir=DIR program executables [EPREFIX/libexec]
- --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
- --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
- --localstatedir=DIR modifiable single-machine data [PREFIX/var]
- --libdir=DIR object code libraries [EPREFIX/lib]
- --includedir=DIR C header files [PREFIX/include]
- --oldincludedir=DIR C header files for non-gcc [/usr/include]
- --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
- --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
- --infodir=DIR info documentation [DATAROOTDIR/info]
- --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
- --mandir=DIR man documentation [DATAROOTDIR/man]
- --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
- --htmldir=DIR html documentation [DOCDIR]
- --dvidir=DIR dvi documentation [DOCDIR]
- --pdfdir=DIR pdf documentation [DOCDIR]
- --psdir=DIR ps documentation [DOCDIR]
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
_ACEOF
cat <<\_ACEOF
@@ -1193,6 +1253,7 @@ if test -n "$ac_init_help"; then
cat <<\_ACEOF
Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
--disable-framework disable mod_wsgi framework link
@@ -1201,9 +1262,22 @@ Optional Features:
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
--without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
- --with-apxs=NAME name of the apxs executable [apxs]
- --with-python=NAME name of the python executable [python]
-
+ --with-apxs=NAME name of the apxs executable [[apxs]]
+ --with-python=NAME name of the python executable [[python]]
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
_ACEOF
ac_status=$?
fi
@@ -1211,15 +1285,17 @@ fi
if test "$ac_init_help" = "recursive"; then
# If there are subdirs, report their specific --help.
for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
- test -d "$ac_dir" || continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
ac_builddir=.
case "$ac_dir" in
.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
*)
- ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
# A ".." for each directory in $ac_dir_suffix.
- ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
case $ac_top_builddir_sub in
"") ac_top_builddir_sub=. ac_top_build_prefix= ;;
*) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
@@ -1255,7 +1331,7 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
echo &&
$SHELL "$ac_srcdir/configure" --help=recursive
else
- echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
fi || ac_status=$?
cd "$ac_pwd" || { ac_status=$?; break; }
done
@@ -1265,21 +1341,175 @@ test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
configure
-generated by GNU Autoconf 2.61
+generated by GNU Autoconf 2.69
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
exit
fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ test -x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by $as_me, which was
-generated by GNU Autoconf 2.61. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -1315,8 +1545,8 @@ for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- echo "PATH: $as_dir"
-done
+ $as_echo "PATH: $as_dir"
+ done
IFS=$as_save_IFS
} >&5
@@ -1350,12 +1580,12 @@ do
| -silent | --silent | --silen | --sile | --sil)
continue ;;
*\'*)
- ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
esac
case $ac_pass in
- 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
2)
- ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ as_fn_append ac_configure_args1 " '$ac_arg'"
if test $ac_must_keep_next = true; then
ac_must_keep_next=false # Got value, back to normal.
else
@@ -1371,13 +1601,13 @@ do
-* ) ac_must_keep_next=true ;;
esac
fi
- ac_configure_args="$ac_configure_args '$ac_arg'"
+ as_fn_append ac_configure_args " '$ac_arg'"
;;
esac
done
done
-$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
-$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
# When interrupted or exit'd, cleanup temporary files, and complete
# config.log. We remove comments because anyway the quotes in there
@@ -1389,11 +1619,9 @@ trap 'exit_status=$?
{
echo
- cat <<\_ASBOX
-## ---------------- ##
+ $as_echo "## ---------------- ##
## Cache variables. ##
-## ---------------- ##
-_ASBOX
+## ---------------- ##"
echo
# The following way of writing the cache mishandles newlines in values,
(
@@ -1402,12 +1630,13 @@ _ASBOX
case $ac_val in #(
*${as_nl}*)
case $ac_var in #(
- *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
-echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
esac
case $ac_var in #(
_ | IFS | as_nl) ;; #(
- *) $as_unset $ac_var ;;
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
esac ;;
esac
done
@@ -1426,128 +1655,136 @@ echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
)
echo
- cat <<\_ASBOX
-## ----------------- ##
+ $as_echo "## ----------------- ##
## Output variables. ##
-## ----------------- ##
-_ASBOX
+## ----------------- ##"
echo
for ac_var in $ac_subst_vars
do
eval ac_val=\$$ac_var
case $ac_val in
- *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
esac
- echo "$ac_var='\''$ac_val'\''"
+ $as_echo "$ac_var='\''$ac_val'\''"
done | sort
echo
if test -n "$ac_subst_files"; then
- cat <<\_ASBOX
-## ------------------- ##
+ $as_echo "## ------------------- ##
## File substitutions. ##
-## ------------------- ##
-_ASBOX
+## ------------------- ##"
echo
for ac_var in $ac_subst_files
do
eval ac_val=\$$ac_var
case $ac_val in
- *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
esac
- echo "$ac_var='\''$ac_val'\''"
+ $as_echo "$ac_var='\''$ac_val'\''"
done | sort
echo
fi
if test -s confdefs.h; then
- cat <<\_ASBOX
-## ----------- ##
+ $as_echo "## ----------- ##
## confdefs.h. ##
-## ----------- ##
-_ASBOX
+## ----------- ##"
echo
cat confdefs.h
echo
fi
test "$ac_signal" != 0 &&
- echo "$as_me: caught signal $ac_signal"
- echo "$as_me: exit $exit_status"
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
} >&5
rm -f core *.core core.conftest.* &&
rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
exit $exit_status
' 0
for ac_signal in 1 2 13 15; do
- trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
done
ac_signal=0
# confdefs.h avoids OS command line length limits that DEFS can exceed.
rm -f -r conftest* confdefs.h
+$as_echo "/* confdefs.h */" > confdefs.h
+
# Predefined preprocessor variables.
cat >>confdefs.h <<_ACEOF
#define PACKAGE_NAME "$PACKAGE_NAME"
_ACEOF
-
cat >>confdefs.h <<_ACEOF
#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
_ACEOF
-
cat >>confdefs.h <<_ACEOF
#define PACKAGE_VERSION "$PACKAGE_VERSION"
_ACEOF
-
cat >>confdefs.h <<_ACEOF
#define PACKAGE_STRING "$PACKAGE_STRING"
_ACEOF
-
cat >>confdefs.h <<_ACEOF
#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
_ACEOF
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
# Let the site file select an alternate cache file if it wants to.
-# Prefer explicitly selected file to automatically selected ones.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
if test -n "$CONFIG_SITE"; then
- set x "$CONFIG_SITE"
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
elif test "x$prefix" != xNONE; then
- set x "$prefix/share/config.site" "$prefix/etc/config.site"
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
else
- set x "$ac_default_prefix/share/config.site" \
- "$ac_default_prefix/etc/config.site"
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
fi
-shift
-for ac_site_file
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
do
- if test -r "$ac_site_file"; then
- { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
-echo "$as_me: loading site script $ac_site_file" >&6;}
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
sed 's/^/| /' "$ac_site_file" >&5
- . "$ac_site_file"
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
fi
done
if test -r "$cache_file"; then
- # Some versions of bash will fail to source /dev/null (special
- # files actually), so we avoid doing that.
- if test -f "$cache_file"; then
- { echo "$as_me:$LINENO: loading cache $cache_file" >&5
-echo "$as_me: loading cache $cache_file" >&6;}
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
case $cache_file in
[\\/]* | ?:[\\/]* ) . "$cache_file";;
*) . "./$cache_file";;
esac
fi
else
- { echo "$as_me:$LINENO: creating cache $cache_file" >&5
-echo "$as_me: creating cache $cache_file" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
>$cache_file
fi
@@ -1561,60 +1798,56 @@ for ac_var in $ac_precious_vars; do
eval ac_new_val=\$ac_env_${ac_var}_value
case $ac_old_set,$ac_new_set in
set,)
- { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
ac_cache_corrupted=: ;;
,set)
- { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
ac_cache_corrupted=: ;;
,);;
*)
if test "x$ac_old_val" != "x$ac_new_val"; then
- { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
-echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
- { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
-echo "$as_me: former value: $ac_old_val" >&2;}
- { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
-echo "$as_me: current value: $ac_new_val" >&2;}
- ac_cache_corrupted=:
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
fi;;
esac
# Pass precious variables to config.status.
if test "$ac_new_set" = set; then
case $ac_new_val in
- *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
*) ac_arg=$ac_var=$ac_new_val ;;
esac
case " $ac_configure_args " in
*" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
- *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
esac
fi
done
if $ac_cache_corrupted; then
- { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
-echo "$as_me: error: changes in the environment can compromise the build" >&2;}
- { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
-echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
- { (exit 1); exit 1; }; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
fi
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
@@ -1625,7 +1858,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
# Check whether --enable-framework was given.
-if test "${enable_framework+set}" = set; then
+if test "${enable_framework+set}" = set; then :
enableval=$enable_framework; ENABLE_FRAMEWORK=$enableval
else
ENABLE_FRAMEWORK=yes
@@ -1633,7 +1866,7 @@ fi
# Check whether --enable-embedded was given.
-if test "${enable_embedded+set}" = set; then
+if test "${enable_embedded+set}" = set; then :
enableval=$enable_embedded; ENABLE_EMBEDDED=$enableval
else
ENABLE_EMBEDDED=yes
@@ -1642,7 +1875,7 @@ fi
# Check whether --with-apxs was given.
-if test "${with_apxs+set}" = set; then
+if test "${with_apxs+set}" = set; then :
withval=$with_apxs; APXS="$with_apxs"
fi
@@ -1652,10 +1885,10 @@ if test -z "${APXS}"; then
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_path_APXS+set}" = set; then
- echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_APXS+:} false; then :
+ $as_echo_n "(cached) " >&6
else
case $APXS in
[\\/]* | ?:[\\/]*)
@@ -1668,14 +1901,14 @@ for as_dir in $as_dummy
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_APXS="$as_dir/$ac_word$ac_exec_ext"
- echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
-done
+ done
IFS=$as_save_IFS
;;
@@ -1683,11 +1916,11 @@ esac
fi
APXS=$ac_cv_path_APXS
if test -n "$APXS"; then
- { echo "$as_me:$LINENO: result: $APXS" >&5
-echo "${ECHO_T}$APXS" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $APXS" >&5
+$as_echo "$APXS" >&6; }
else
- { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
fi
@@ -1699,8 +1932,810 @@ fi
-{ echo "$as_me:$LINENO: checking Apache version" >&5
-echo $ECHO_N "checking Apache version... $ECHO_C" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+for ac_func in prctl
+do :
+ ac_fn_c_check_func "$LINENO" "prctl" "ac_cv_func_prctl"
+if test "x$ac_cv_func_prctl" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_PRCTL 1
+_ACEOF
+
+fi
+done
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking Apache version" >&5
+$as_echo_n "checking Apache version... " >&6; }
HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`"
HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`"
if test -x ${HTTPD}; then
@@ -1721,12 +2756,12 @@ else
fi
fi
fi
-{ echo "$as_me:$LINENO: result: $HTTPD_VERSION" >&5
-echo "${ECHO_T}$HTTPD_VERSION" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $HTTPD_VERSION" >&5
+$as_echo "$HTTPD_VERSION" >&6; }
# Check whether --with-python was given.
-if test "${with_python+set}" = set; then
+if test "${with_python+set}" = set; then :
withval=$with_python; PYTHON="$with_python"
fi
@@ -1736,10 +2771,10 @@ if test -z "${PYTHON}"; then
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_path_PYTHON+set}" = set; then
- echo $ECHO_N "(cached) $ECHO_C" >&6
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PYTHON+:} false; then :
+ $as_echo_n "(cached) " >&6
else
case $PYTHON in
[\\/]* | ?:[\\/]*)
@@ -1752,14 +2787,14 @@ for as_dir in $as_dummy
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext"
- echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
-done
+ done
IFS=$as_save_IFS
;;
@@ -1767,11 +2802,11 @@ esac
fi
PYTHON=$ac_cv_path_PYTHON
if test -n "$PYTHON"; then
- { echo "$as_me:$LINENO: result: $PYTHON" >&5
-echo "${ECHO_T}$PYTHON" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5
+$as_echo "$PYTHON" >&6; }
else
- { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
fi
@@ -1785,7 +2820,15 @@ fi
PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
- stdout.write((sysconfig.get_config_var("VERSION")))'`
+ stdout.write(sysconfig.get_config_var("VERSION"))'`
+
+PYTHON_LDVERSION=`${PYTHON} -c 'from sys import stdout; \
+ from distutils import sysconfig; \
+ stdout.write(sysconfig.get_config_var("LDVERSION") or "")'`
+
+if test x"${PYTHON_LDVERSION}" = x""; then
+ PYTHON_LDVERSION=${PYTHON_VERSION}
+fi
CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
@@ -1823,12 +2866,28 @@ PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'`
+if test "${PYTHON_LDVERSION}" != "${PYTHON_VERSION}"; then
+ PYTHONCFGDIR="${PYTHONCFGDIR}-${PYTHON_LDVERSION}"
+fi
+
if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \
"${ENABLE_FRAMEWORK}" != "yes"; then
LDFLAGS1="-L${PYTHONLIBDIR}"
LDFLAGS2="-L${PYTHONCFGDIR}"
- LDLIBS1="-lpython${PYTHON_VERSION}"
+ LDLIBS1="-lpython${PYTHON_LDVERSION}"
+
+ # MacOS X seems to be broken and doesn't use ABIFLAGS suffix
+ # so add a check to try and work out what we need to do.
+
+ if test -f "${PYTHONLIBDIR}/libpython${PYTHON_VERSION}.a"; then
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ fi
+
+ if test -f "${PYTHONCFGDIR}/libpython${PYTHON_VERSION}.a"; then
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ fi
+
LDLIBS2=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
stdout.write(sysconfig.get_config_var("LIBS"))'`
@@ -1879,9 +2938,6 @@ LIBEXECDIR="`${APXS} -q LIBEXECDIR`"
HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'`
-rm -f Makefile.in
-ln -s posix-ap${HTTPD_MAJOR_VERSION}X.mk.in Makefile.in
-
ac_config_files="$ac_config_files Makefile"
cat >confcache <<\_ACEOF
@@ -1911,12 +2967,13 @@ _ACEOF
case $ac_val in #(
*${as_nl}*)
case $ac_var in #(
- *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
-echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
esac
case $ac_var in #(
_ | IFS | as_nl) ;; #(
- *) $as_unset $ac_var ;;
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
esac ;;
esac
done
@@ -1924,8 +2981,8 @@ echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
(set) 2>&1 |
case $as_nl`(ac_space=' '; set) 2>&1` in #(
*${as_nl}ac_space=\ *)
- # `set' does not quote correctly, so add quotes (double-quote
- # substitution turns \\\\ into \\, and sed turns \\ into \).
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
sed -n \
"s/'/'\\\\''/g;
s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
@@ -1947,13 +3004,24 @@ echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
:end' >>confcache
if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
if test -w "$cache_file"; then
- test "x$cache_file" != "x/dev/null" &&
- { echo "$as_me:$LINENO: updating cache $cache_file" >&5
-echo "$as_me: updating cache $cache_file" >&6;}
- cat confcache >$cache_file
+ if test "x$cache_file" != "x/dev/null"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
else
- { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
-echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
fi
fi
rm -f confcache
@@ -1970,6 +3038,12 @@ test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
# take arguments), then branch to the quote section. Otherwise,
# look for a macro that doesn't take arguments.
ac_script='
+:mline
+/\\$/{
+ N
+ s,\\\n,,
+ b mline
+}
t clear
:clear
s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g
@@ -1996,14 +3070,15 @@ DEFS=`sed -n "$ac_script" confdefs.h`
ac_libobjs=
ac_ltlibobjs=
+U=
for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
# 1. Remove the extension, and $U if already installed.
ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
- ac_i=`echo "$ac_i" | sed "$ac_script"`
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
# 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
# will be set to the directory where LIBOBJS objects are built.
- ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
- ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
done
LIBOBJS=$ac_libobjs
@@ -2011,12 +3086,14 @@ LTLIBOBJS=$ac_ltlibobjs
-: ${CONFIG_STATUS=./config.status}
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
ac_clean_files_save=$ac_clean_files
ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
-echo "$as_me: creating $CONFIG_STATUS" >&6;}
-cat >$CONFIG_STATUS <<_ACEOF
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
#! $SHELL
# Generated by $as_me.
# Run this file to recreate the current configuration.
@@ -2026,59 +3103,79 @@ cat >$CONFIG_STATUS <<_ACEOF
debug=false
ac_cs_recheck=false
ac_cs_silent=false
-SHELL=\${CONFIG_SHELL-$SHELL}
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
-## --------------------- ##
-## M4sh Initialization. ##
-## --------------------- ##
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
# Be more Bourne compatible
DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
NULLCMD=:
- # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
- case `(set -o) 2>/dev/null` in
- *posix*) set -o posix ;;
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
esac
-
fi
-
-
-# PATH needs CR
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
- echo "#! /bin/sh" >conf$$.sh
- echo "exit 0" >>conf$$.sh
- chmod +x conf$$.sh
- if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
- PATH_SEPARATOR=';'
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
else
- PATH_SEPARATOR=:
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
fi
- rm -f conf$$.sh
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
fi
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
- as_unset=unset
-else
- as_unset=false
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
fi
@@ -2087,20 +3184,19 @@ fi
# there to prevent editors from complaining about space-tab.
# (If _AS_PATH_WALK were called with IFS unset, it would disable word
# splitting by setting IFS to empty value.)
-as_nl='
-'
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
-case $0 in
+as_myself=
+case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
- test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
IFS=$as_save_IFS
;;
@@ -2111,32 +3207,111 @@ if test "x$as_myself" = x; then
as_myself=$0
fi
if test ! -f "$as_myself"; then
- echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
- { (exit 1); exit 1; }
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
fi
-# Work around bugs in pre-3.0 UWIN ksh.
-for as_var in ENV MAIL MAILPATH
-do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
done
PS1='$ '
PS2='> '
PS4='+ '
# NLS nuisances.
-for as_var in \
- LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
- LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
- LC_TELEPHONE LC_TIME
-do
- if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
- eval $as_var=C; export $as_var
- else
- ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
-done
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
-# Required to use basename.
if expr a : '\(a\)' >/dev/null 2>&1 &&
test "X`expr 00001 : '.*\(...\)'`" = X001; then
as_expr=expr
@@ -2150,13 +3325,17 @@ else
as_basename=false
fi
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
-# Name of the executable.
as_me=`$as_basename -- "$0" ||
$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
X"$0" : 'X\(//\)$' \| \
X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X/"$0" |
+$as_echo X/"$0" |
sed '/^.*\/\([^/][^/]*\)\/*$/{
s//\1/
q
@@ -2171,131 +3350,118 @@ echo X/"$0" |
}
s/.*/./; q'`
-# CDPATH.
-$as_unset CDPATH
-
-
-
- as_lineno_1=$LINENO
- as_lineno_2=$LINENO
- test "x$as_lineno_1" != "x$as_lineno_2" &&
- test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
-
- # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
- # uniformly replaced by the line number. The first 'sed' inserts a
- # line-number line after each line using $LINENO; the second 'sed'
- # does the real work. The second script uses 'N' to pair each
- # line-number line with the line containing $LINENO, and appends
- # trailing '-' during substitution so that $LINENO is not a special
- # case at line end.
- # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
- # scripts with optimization help from Paolo Bonzini. Blame Lee
- # E. McMahon (1931-1989) for sed's syntax. :-)
- sed -n '
- p
- /[$]LINENO/=
- ' <$as_myself |
- sed '
- s/[$]LINENO.*/&-/
- t lineno
- b
- :lineno
- N
- :loop
- s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
- t loop
- s/-\n.*//
- ' >$as_me.lineno &&
- chmod +x "$as_me.lineno" ||
- { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
- { (exit 1); exit 1; }; }
-
- # Don't try to exec as it changes $[0], causing all sort of problems
- # (the dirname of $[0] is not the place where we might find the
- # original and so on. Autoconf is especially sensitive to this).
- . "./$as_me.lineno"
- # Exit status is that of the last command.
- exit
-}
-
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
- as_dirname=dirname
-else
- as_dirname=false
-fi
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in
+case `echo -n x` in #(((((
-n*)
- case `echo 'x\c'` in
+ case `echo 'xy\c'` in
*c*) ECHO_T=' ';; # ECHO_T is single tab character.
- *) ECHO_C='\c';;
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
esac;;
*)
ECHO_N='-n';;
esac
-if expr a : '\(a\)' >/dev/null 2>&1 &&
- test "X`expr 00001 : '.*\(...\)'`" = X001; then
- as_expr=expr
-else
- as_expr=false
-fi
-
rm -f conf$$ conf$$.exe conf$$.file
if test -d conf$$.dir; then
rm -f conf$$.dir/conf$$.file
else
rm -f conf$$.dir
- mkdir conf$$.dir
-fi
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
- as_ln_s='ln -s'
- # ... but there are two gotchas:
- # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
- # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
- ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
-elif ln conf$$.file conf$$ 2>/dev/null; then
- as_ln_s=ln
-else
- as_ln_s='cp -p'
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
if mkdir -p . 2>/dev/null; then
- as_mkdir_p=:
+ as_mkdir_p='mkdir -p "$as_dir"'
else
test -d ./-p && rmdir ./-p
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -2305,13 +3471,19 @@ as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
-# Save the log message, to keep $[0] and so on meaningful, and to
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by $as_me, which was
-generated by GNU Autoconf 2.61. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -2324,59 +3496,74 @@ on `(hostname || uname -n) 2>/dev/null | sed 1q`
_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
# Files that config.status was made for.
config_files="$ac_config_files"
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
ac_cs_usage="\
-\`$as_me' instantiates files from templates according to the
-current configuration.
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
-Usage: $0 [OPTIONS] [FILE]...
+Usage: $0 [OPTION]... [TAG]...
-h, --help print this help, then exit
-V, --version print version number and configuration settings, then exit
- -q, --quiet do not print progress messages
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
-d, --debug don't remove temporary files
--recheck update $as_me by reconfiguring in the same conditions
- --file=FILE[:TEMPLATE]
- instantiate the configuration file FILE
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
Configuration files:
$config_files
-Report bugs to <bug-autoconf@gnu.org>."
+Report bugs to the package provider."
_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
config.status
-configured by $0, generated by GNU Autoconf 2.61,
- with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
+configured by $0, generated by GNU Autoconf 2.69,
+ with options \\"\$ac_cs_config\\"
-Copyright (C) 2006 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
ac_pwd='$ac_pwd'
srcdir='$srcdir'
+test -n "\$AWK" || AWK=awk
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
-# If no file are specified by the user, then we need to provide default
-# value. By we need to know if files were specified by the user.
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
ac_need_defaults=:
while test $# != 0
do
case $1 in
- --*=*)
+ --*=?*)
ac_option=`expr "X$1" : 'X\([^=]*\)='`
ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
ac_shift=:
;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
*)
ac_option=$1
ac_optarg=$2
@@ -2389,25 +3576,30 @@ do
-recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
ac_cs_recheck=: ;;
--version | --versio | --versi | --vers | --ver | --ve | --v | -V )
- echo "$ac_cs_version"; exit ;;
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
--debug | --debu | --deb | --de | --d | -d )
debug=: ;;
--file | --fil | --fi | --f )
$ac_shift
- CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
ac_need_defaults=false;;
--he | --h | --help | --hel | -h )
- echo "$ac_cs_usage"; exit ;;
+ $as_echo "$ac_cs_usage"; exit ;;
-q | -quiet | --quiet | --quie | --qui | --qu | --q \
| -silent | --silent | --silen | --sile | --sil | --si | --s)
ac_cs_silent=: ;;
# This is an error.
- -*) { echo "$as_me: error: unrecognized option: $1
-Try \`$0 --help' for more information." >&2
- { (exit 1); exit 1; }; } ;;
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
- *) ac_config_targets="$ac_config_targets $1"
+ *) as_fn_append ac_config_targets " $1"
ac_need_defaults=false ;;
esac
@@ -2422,30 +3614,32 @@ if $ac_cs_silent; then
fi
_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
- echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
- CONFIG_SHELL=$SHELL
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
export CONFIG_SHELL
- exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ exec "\$@"
fi
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
exec 5>>config.log
{
echo
sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
## Running $as_me. ##
_ASBOX
- echo "$ac_log"
+ $as_echo "$ac_log"
} >&5
_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# Handling of arguments.
for ac_config_target in $ac_config_targets
@@ -2453,9 +3647,7 @@ do
case $ac_config_target in
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
- *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
-echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
- { (exit 1); exit 1; }; };;
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
done
@@ -2476,157 +3668,194 @@ fi
# after its creation but before its name has been assigned to `$tmp'.
$debug ||
{
- tmp=
+ tmp= ac_tmp=
trap 'exit_status=$?
- { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
' 0
- trap '{ (exit 1); exit 1; }' 1 2 13 15
+ trap 'as_fn_exit 1' 1 2 13 15
}
# Create a (secure) tmp directory for tmp files.
{
tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
- test -n "$tmp" && test -d "$tmp"
+ test -d "$tmp"
} ||
{
tmp=./conf$$-$RANDOM
(umask 077 && mkdir "$tmp")
-} ||
-{
- echo "$me: cannot create a temporary directory in ." >&2
- { (exit 1); exit 1; }
-}
-
-#
-# Set up the sed scripts for CONFIG_FILES section.
-#
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
-# No need to generate the scripts if there are no CONFIG_FILES.
-# This happens for instance when ./config.status config.h
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
if test -n "$CONFIG_FILES"; then
-_ACEOF
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
ac_delim='%!_!# '
for ac_last_try in false false false false false :; do
- cat >conf$$subs.sed <<_ACEOF
-SHELL!$SHELL$ac_delim
-PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim
-PACKAGE_NAME!$PACKAGE_NAME$ac_delim
-PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim
-PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim
-PACKAGE_STRING!$PACKAGE_STRING$ac_delim
-PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim
-exec_prefix!$exec_prefix$ac_delim
-prefix!$prefix$ac_delim
-program_transform_name!$program_transform_name$ac_delim
-bindir!$bindir$ac_delim
-sbindir!$sbindir$ac_delim
-libexecdir!$libexecdir$ac_delim
-datarootdir!$datarootdir$ac_delim
-datadir!$datadir$ac_delim
-sysconfdir!$sysconfdir$ac_delim
-sharedstatedir!$sharedstatedir$ac_delim
-localstatedir!$localstatedir$ac_delim
-includedir!$includedir$ac_delim
-oldincludedir!$oldincludedir$ac_delim
-docdir!$docdir$ac_delim
-infodir!$infodir$ac_delim
-htmldir!$htmldir$ac_delim
-dvidir!$dvidir$ac_delim
-pdfdir!$pdfdir$ac_delim
-psdir!$psdir$ac_delim
-libdir!$libdir$ac_delim
-localedir!$localedir$ac_delim
-mandir!$mandir$ac_delim
-DEFS!$DEFS$ac_delim
-ECHO_C!$ECHO_C$ac_delim
-ECHO_N!$ECHO_N$ac_delim
-ECHO_T!$ECHO_T$ac_delim
-LIBS!$LIBS$ac_delim
-build_alias!$build_alias$ac_delim
-host_alias!$host_alias$ac_delim
-target_alias!$target_alias$ac_delim
-APXS!$APXS$ac_delim
-PYTHON!$PYTHON$ac_delim
-CPPFLAGS!$CPPFLAGS$ac_delim
-CFLAGS!$CFLAGS$ac_delim
-LDFLAGS!$LDFLAGS$ac_delim
-LDLIBS!$LDLIBS$ac_delim
-LIBEXECDIR!$LIBEXECDIR$ac_delim
-LIBOBJS!$LIBOBJS$ac_delim
-LTLIBOBJS!$LTLIBOBJS$ac_delim
-_ACEOF
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
- if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 46; then
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
break
elif $ac_last_try; then
- { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
-echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
- { (exit 1); exit 1; }; }
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
done
+rm -f conf$$subs.sh
-ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed`
-if test -n "$ac_eof"; then
- ac_eof=`echo "$ac_eof" | sort -nru | sed 1q`
- ac_eof=`expr $ac_eof + 1`
-fi
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
-cat >>$CONFIG_STATUS <<_ACEOF
-cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end
+_ACAWK
_ACEOF
-sed '
-s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g
-s/^/s,@/; s/!/@,|#_!!_#|/
-:n
-t n
-s/'"$ac_delim"'$/,g/; t
-s/$/\\/; p
-N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n
-' >>$CONFIG_STATUS <conf$$subs.sed
-rm -f conf$$subs.sed
-cat >>$CONFIG_STATUS <<_ACEOF
-:end
-s/|#_!!_#|//g
-CEOF$ac_eof
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
_ACEOF
-
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
# trailing colons and then remove the whole line if VPATH becomes empty
# (actually we leave an empty line to preserve line numbers).
if test "x$srcdir" = x.; then
- ac_vpsub='/^[ ]*VPATH[ ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[ ]*\):*/\1/
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
s/^[^=]*=[ ]*$//
}'
fi
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
fi # test -n "$CONFIG_FILES"
-for ac_tag in :F $CONFIG_FILES
+eval set X " :F $CONFIG_FILES "
+shift
+for ac_tag
do
case $ac_tag in
:[FHLC]) ac_mode=$ac_tag; continue;;
esac
case $ac_mode$ac_tag in
:[FHL]*:*);;
- :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5
-echo "$as_me: error: Invalid tag $ac_tag." >&2;}
- { (exit 1); exit 1; }; };;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
:[FH]-) ac_tag=-:-;;
:[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
esac
@@ -2645,7 +3874,7 @@ echo "$as_me: error: Invalid tag $ac_tag." >&2;}
for ac_f
do
case $ac_f in
- -) ac_f="$tmp/stdin";;
+ -) ac_f="$ac_tmp/stdin";;
*) # Look for the file first in the build tree, then in the source tree
# (if the path is not absolute). The absolute path cannot be DOS-style,
# because $ac_f cannot contain `:'.
@@ -2654,26 +3883,34 @@ echo "$as_me: error: Invalid tag $ac_tag." >&2;}
[\\/$]*) false;;
*) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
esac ||
- { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
-echo "$as_me: error: cannot find input file: $ac_f" >&2;}
- { (exit 1); exit 1; }; };;
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
esac
- ac_file_inputs="$ac_file_inputs $ac_f"
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
done
# Let's still pretend it is `configure' which instantiates (i.e., don't
# use $as_me), people would be surprised to read:
# /* config.h. Generated by config.status. */
- configure_input="Generated from "`IFS=:
- echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure."
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
if test x"$ac_file" != x-; then
configure_input="$ac_file. $configure_input"
- { echo "$as_me:$LINENO: creating $ac_file" >&5
-echo "$as_me: creating $ac_file" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
case $ac_tag in
- *:-:* | *:-) cat >"$tmp/stdin";;
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
esac
;;
esac
@@ -2683,42 +3920,7 @@ $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$ac_file" : 'X\(//\)[^/]' \| \
X"$ac_file" : 'X\(//\)$' \| \
X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$ac_file" |
- sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
- s//\1/
- q
- }
- /^X\(\/\/\)[^/].*/{
- s//\1/
- q
- }
- /^X\(\/\/\)$/{
- s//\1/
- q
- }
- /^X\(\/\).*/{
- s//\1/
- q
- }
- s/.*/./; q'`
- { as_dir="$ac_dir"
- case $as_dir in #(
- -*) as_dir=./$as_dir;;
- esac
- test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
- as_dirs=
- while :; do
- case $as_dir in #(
- *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #(
- *) as_qdir=$as_dir;;
- esac
- as_dirs="'$as_qdir' $as_dirs"
- as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
- X"$as_dir" : 'X\(//\)[^/]' \| \
- X"$as_dir" : 'X\(//\)$' \| \
- X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$as_dir" |
+$as_echo X"$ac_file" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
@@ -2736,20 +3938,15 @@ echo X"$as_dir" |
q
}
s/.*/./; q'`
- test -d "$as_dir" && break
- done
- test -z "$as_dirs" || eval "mkdir $as_dirs"
- } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
-echo "$as_me: error: cannot create directory $as_dir" >&2;}
- { (exit 1); exit 1; }; }; }
+ as_dir="$ac_dir"; as_fn_mkdir_p
ac_builddir=.
case "$ac_dir" in
.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
*)
- ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
# A ".." for each directory in $ac_dir_suffix.
- ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
case $ac_top_builddir_sub in
"") ac_top_builddir_sub=. ac_top_build_prefix= ;;
*) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
@@ -2785,12 +3982,12 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# If the template does not know about datarootdir, expand it.
# FIXME: This hack should be removed a few years after 2.60.
ac_datarootdir_hack=; ac_datarootdir_seen=
-
-case `sed -n '/datarootdir/ {
+ac_sed_dataroot='
+/datarootdir/ {
p
q
}
@@ -2798,36 +3995,37 @@ case `sed -n '/datarootdir/ {
/@docdir@/p
/@infodir@/p
/@localedir@/p
-/@mandir@/p
-' $ac_file_inputs` in
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
*datarootdir*) ac_datarootdir_seen=yes;;
*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
- { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
-echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_datarootdir_hack='
s&@datadir@&$datadir&g
s&@docdir@&$docdir&g
s&@infodir@&$infodir&g
s&@localedir@&$localedir&g
s&@mandir@&$mandir&g
- s&\\\${datarootdir}&$datarootdir&g' ;;
+ s&\\\${datarootdir}&$datarootdir&g' ;;
esac
_ACEOF
# Neutralize VPATH when `$srcdir' = `.'.
# Shell code in configure.ac might set extrasub.
# FIXME: do we really want to maintain this feature?
-cat >>$CONFIG_STATUS <<_ACEOF
- sed "$ac_vpsub
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
$extrasub
_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
:t
/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s&@configure_input@&$configure_input&;t t
+s|@configure_input@|$ac_sed_conf_input|;t t
s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
s&@srcdir@&$ac_srcdir&;t t
s&@abs_srcdir@&$ac_abs_srcdir&;t t
s&@top_srcdir@&$ac_top_srcdir&;t t
@@ -2836,21 +4034,25 @@ s&@builddir@&$ac_builddir&;t t
s&@abs_builddir@&$ac_abs_builddir&;t t
s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
$ac_datarootdir_hack
-" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
- { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
- { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
- { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&5
-echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&2;}
-
- rm -f "$tmp/stdin"
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
case $ac_file in
- -) cat "$tmp/out"; rm -f "$tmp/out";;
- *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;;
- esac
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
;;
@@ -2860,11 +4062,13 @@ which seems to be undefined. Please make sure it is defined." >&2;}
done # for ac_tag
-{ (exit 0); exit 0; }
+as_fn_exit 0
_ACEOF
-chmod +x $CONFIG_STATUS
ac_clean_files=$ac_clean_files_save
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
# configure is writing to config.log, and then calls config.status.
# config.status does its own redirection, appending to config.log.
@@ -2884,6 +4088,10 @@ if test "$no_create" != yes; then
exec 5>>config.log
# Use ||, not &&, to avoid exiting from the if with $? = 1, which
# would make configure fail if this is the last instruction.
- $ac_cs_success || { (exit 1); exit 1; }
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
fi
diff --git a/configure.ac b/configure.ac
index ef5c394..a81bffa 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,6 +1,6 @@
dnl vim: set sw=4 expandtab :
dnl
-dnl Copyright 2007-2009 GRAHAM DUMPLETON
+dnl Copyright 2007-2014 GRAHAM DUMPLETON
dnl
dnl Licensed under the Apache License, Version 2.0 (the "License");
dnl you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ dnl limitations under the License.
dnl Process this file with autoconf to produce a configure script.
-AC_INIT(mod_wsgi.c)
+AC_INIT(src/server/mod_wsgi.c)
AC_ARG_ENABLE(framework, AC_HELP_STRING([--disable-framework],
[disable mod_wsgi framework link]),
@@ -39,6 +39,8 @@ fi
AC_SUBST(APXS)
+AC_CHECK_FUNCS(prctl)
+
AC_MSG_CHECKING(Apache version)
HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`"
HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`"
@@ -75,7 +77,15 @@ AC_SUBST(PYTHON)
PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
- stdout.write((sysconfig.get_config_var("VERSION")))'`
+ stdout.write(sysconfig.get_config_var("VERSION"))'`
+
+PYTHON_LDVERSION=`${PYTHON} -c 'from sys import stdout; \
+ from distutils import sysconfig; \
+ stdout.write(sysconfig.get_config_var("LDVERSION") or "")'`
+
+if test x"${PYTHON_LDVERSION}" = x""; then
+ PYTHON_LDVERSION=${PYTHON_VERSION}
+fi
CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
@@ -113,12 +123,28 @@ PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'`
+if test "${PYTHON_LDVERSION}" != "${PYTHON_VERSION}"; then
+ PYTHONCFGDIR="${PYTHONCFGDIR}-${PYTHON_LDVERSION}"
+fi
+
if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \
"${ENABLE_FRAMEWORK}" != "yes"; then
LDFLAGS1="-L${PYTHONLIBDIR}"
LDFLAGS2="-L${PYTHONCFGDIR}"
- LDLIBS1="-lpython${PYTHON_VERSION}"
+ LDLIBS1="-lpython${PYTHON_LDVERSION}"
+
+ # MacOS X seems to be broken and doesn't use ABIFLAGS suffix
+ # so add a check to try and work out what we need to do.
+
+ if test -f "${PYTHONLIBDIR}/libpython${PYTHON_VERSION}.a"; then
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ fi
+
+ if test -f "${PYTHONCFGDIR}/libpython${PYTHON_VERSION}.a"; then
+ LDLIBS1="-lpython${PYTHON_VERSION}"
+ fi
+
LDLIBS2=`${PYTHON} -c 'from sys import stdout; \
from distutils import sysconfig; \
stdout.write(sysconfig.get_config_var("LIBS"))'`
@@ -169,7 +195,4 @@ AC_SUBST(LIBEXECDIR)
HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'`
-rm -f Makefile.in
-ln -s posix-ap${HTTPD_MAJOR_VERSION}X.mk.in Makefile.in
-
AC_OUTPUT(Makefile)
diff --git a/posix-ap1X.mk.in b/posix-ap1X.mk.in
deleted file mode 100644
index a003a9e..0000000
--- a/posix-ap1X.mk.in
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2007 GRAHAM DUMPLETON
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-APXS = @APXS@
-PYTHON = @PYTHON@
-
-DESTDIR =
-LIBEXECDIR = @LIBEXECDIR@
-
-CPPFLAGS = @CPPFLAGS@
-CFLAGS = @CFLAGS@
-LDFLAGS = @LDFLAGS@
-LDLIBS = @LDLIBS@
-
-all : mod_wsgi.so
-
-mod_wsgi.so : mod_wsgi.c
- $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS)
-
-$(DESTDIR)$(LIBEXECDIR) :
- mkdir -p $@
-
-install : all $(DESTDIR)$(LIBEXECDIR)
- $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' mod_wsgi.so
-
-clean :
- -rm -f mod_wsgi.o mod_wsgi.so
- -rm -f config.log config.status
- -rm -rf autom4te.cache
-
-distclean : clean
- -rm -f Makefile Makefile.in
-
-realclean : distclean
- -rm -f configure
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..7e231ad
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,143 @@
+from __future__ import print_function
+
+import os
+import sys
+import fnmatch
+import subprocess
+
+from setuptools import setup
+from distutils.core import Extension
+from distutils.sysconfig import get_config_var as get_python_config
+from distutils.sysconfig import get_python_lib
+
+# Compile all available source files.
+
+source_files = [os.path.join('src/server', name) for name in
+ os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'src/server')) if fnmatch.fnmatch(name, '*.c')]
+
+# Work out all the Apache specific compilation flags.
+
+def find_program(names, default=None, paths=[]):
+ for name in names:
+ for path in os.environ['PATH'].split(':') + paths:
+ program = os.path.join(path, name)
+ if os.path.exists(program):
+ return program
+ return default
+
+APXS = os.environ.get('APXS')
+
+if APXS is None:
+ APXS = find_program(['apxs2', 'apxs'], 'apxs', ['/usr/sbin', os.getcwd()])
+elif not os.path.isabs(APXS):
+ APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()])
+
+if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK):
+ raise RuntimeError('The %r command appears not to be installed or is '
+ 'not executable. Please check the list of prerequisites in the '
+ 'documentation for this package and install any missing '
+ 'Apache httpd server packages.' % APXS)
+
+def get_apxs_config(query):
+ p = subprocess.Popen([APXS, '-q', query],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ if isinstance(out, bytes):
+ out = out.decode('UTF-8')
+ return out.strip()
+
+INCLUDEDIR = get_apxs_config('INCLUDEDIR')
+CPPFLAGS = get_apxs_config('CPPFLAGS').split()
+CFLAGS = get_apxs_config('CFLAGS').split()
+
+EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split()
+EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split()
+EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split()
+
+# Write out apxs_config.py which caches various configuration
+# related to Apache.
+
+BINDIR = get_apxs_config('BINDIR')
+SBINDIR = get_apxs_config('SBINDIR')
+
+PROGNAME = get_apxs_config('PROGNAME')
+
+MPM_NAME = get_apxs_config('MPM_NAME')
+LIBEXECDIR = get_apxs_config('LIBEXECDIR')
+SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR')
+
+if os.path.exists(os.path.join(SBINDIR, PROGNAME)):
+ HTTPD = os.path.join(SBINDIR, PROGNAME)
+elif os.path.exists(os.path.join(BINDIR, PROGNAME)):
+ HTTPD = os.path.join(BINDIR, PROGNAME)
+else:
+ HTTPD = PROGNAME
+
+with open(os.path.join(os.path.dirname(__file__),
+ 'src/server/apxs_config.py'), 'w') as fp:
+ print('HTTPD = "%s"' % HTTPD, file=fp)
+ print('BINDIR = "%s"' % BINDIR, file=fp)
+ print('SBINDIR = "%s"' % SBINDIR, file=fp)
+ print('PROGNAME = "%s"' % PROGNAME, file=fp)
+ print('MPM_NAME = "%s"' % MPM_NAME, file=fp)
+ print('LIBEXECDIR = "%s"' % LIBEXECDIR, file=fp)
+ print('SHLIBPATH_VAR = "%s"' % SHLIBPATH_VAR, file=fp)
+
+# Work out location of Python library and how to link it.
+
+PYTHON_VERSION = get_python_config('VERSION')
+PYTHON_LDVERSION = get_python_config('LDVERSION') or ''
+
+PYTHON_LIBDIR = get_python_config('LIBDIR')
+PYTHON_CFGDIR = get_python_lib(plat_specific=1, standard_lib=1) + '/config'
+
+if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION:
+ PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION)
+
+PYTHON_LDFLAGS = ['-L%s' % PYTHON_LIBDIR, '-L%s' % PYTHON_CFGDIR]
+PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION]
+
+if os.path.exists(os.path.join(PYTHON_LIBDIR,
+ 'libpython%s.a' % PYTHON_VERSION)):
+ PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
+
+if os.path.exists(os.path.join(PYTHON_CFGDIR,
+ 'libpython%s.a' % PYTHON_VERSION)):
+ PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
+
+# Create the final set of compilation flags to be used.
+
+INCLUDE_DIRS = [INCLUDEDIR]
+EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS +
+ CFLAGS + EXTRA_CFLAGS)
+EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS
+
+# Force adding of LD_RUN_PATH for platforms that may need it.
+
+LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '')
+LD_RUN_PATH += ':%s:%s' % (PYTHON_LIBDIR, PYTHON_CFGDIR)
+LD_RUN_PATH = LD_RUN_PATH.lstrip(':')
+
+os.environ['LD_RUN_PATH'] = LD_RUN_PATH
+
+# Now add the definitions to build everything.
+
+extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2]
+
+extension = Extension(extension_name, source_files,
+ include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS,
+ extra_link_args=EXTRA_LINK_ARGS)
+
+setup(name = 'mod_wsgi',
+ version = '5.0.0-beta',
+ description = 'Installer for Apache/mod_wsgi.',
+ author = 'Graham Dumpleton',
+ author_email = 'Graham.Dumpleton@gmail.com',
+ license = 'Apache',
+ packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management',
+ 'mod_wsgi.server.management.commands'],
+ package_dir = {'mod_wsgi': 'src'},
+ ext_modules = [extension],
+ entry_points = { 'console_scripts': ['wsgi-admin = mod_wsgi.server:main'],},
+)
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..bb61062
--- /dev/null
+++ b/src/__init__.py
@@ -0,0 +1,2 @@
+import pkgutil
+__path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/src/server/__init__.py b/src/server/__init__.py
new file mode 100644
index 0000000..c8bda7d
--- /dev/null
+++ b/src/server/__init__.py
@@ -0,0 +1,926 @@
+from __future__ import print_function, division
+
+import os
+import sys
+import shutil
+import subprocess
+import optparse
+import math
+import signal
+import threading
+import atexit
+import imp
+import pwd
+import grp
+
+try:
+ import Queue as queue
+except ImportError:
+ import queue
+
+from . import apxs_config
+
+python_version = '%s%s' % sys.version_info[:2]
+
+try:
+ import sysconfig
+ MOD_WSGI_SO = 'mod_wsgi-py%s%s' % (python_version,
+ sysconfig.get_config_var('SO'))
+ MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO)
+
+ if not os.path.exists(MOD_WSGI_SO):
+ MOD_WSGI_SO = 'mod_wsgi-py%s.so' % python_version
+ MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO)
+
+except ImportError:
+ MOD_WSGI_SO = 'mod_wsgi-py%s.so' % python_version
+ MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO)
+
+def where():
+ return MOD_WSGI_SO
+
+def default_run_user():
+ return pwd.getpwuid(os.getuid()).pw_name
+
+def default_run_group():
+ return grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
+
+def find_program(names, default=None, paths=[]):
+ for name in names:
+ for path in os.environ['PATH'].split(':') + paths:
+ program = os.path.join(path, name)
+ if os.path.exists(program):
+ return program
+ return default
+
+def find_mimetypes():
+ import mimetypes
+ for name in mimetypes.knownfiles:
+ if os.path.exists(name):
+ return name
+ break
+ else:
+ return name
+
+APACHE_GENERAL_CONFIG = """
+LoadModule version_module '%(modules_directory)s/mod_version.so'
+
+ServerName %(host)s
+ServerRoot '%(server_root)s'
+PidFile '%(pid_file)s'
+
+ServerSignature Off
+
+User ${WSGI_RUN_USER}
+Group ${WSGI_RUN_GROUP}
+
+Listen %(host)s:%(port)s
+
+<IfVersion < 2.4>
+LockFile '%(server_root)s/accept.lock'
+</IfVersion>
+
+<IfVersion >= 2.4>
+LoadModule mpm_event_module '%(modules_directory)s/mod_mpm_event.so'
+LoadModule access_compat_module '%(modules_directory)s/mod_access_compat.so'
+LoadModule unixd_module '%(modules_directory)s/mod_unixd.so'
+LoadModule authn_core_module '%(modules_directory)s/mod_authn_core.so'
+LoadModule authz_core_module '%(modules_directory)s/mod_authz_core.so'
+</IfVersion>
+
+LoadModule authz_host_module '%(modules_directory)s/mod_authz_host.so'
+LoadModule mime_module '%(modules_directory)s/mod_mime.so'
+LoadModule rewrite_module '%(modules_directory)s/mod_rewrite.so'
+LoadModule alias_module '%(modules_directory)s/mod_alias.so'
+LoadModule wsgi_module '%(mod_wsgi_so)s'
+
+<IfDefine WSGI_SERVER_STATUS>
+LoadModule status_module '%(modules_directory)s/mod_status.so'
+</IfDefine>
+
+<IfVersion < 2.4>
+DefaultType text/plain
+</IfVersion>
+
+TypesConfig '%(mime_types)s'
+
+HostnameLookups Off
+MaxMemFree 64
+Timeout 60
+
+LimitRequestBody %(limit_request_body)s
+
+<Directory />
+ AllowOverride None
+ Order deny,allow
+ Deny from all
+</Directory>
+
+WSGIPythonHome '%(python_home)s'
+WSGIRestrictEmbedded On
+WSGISocketPrefix %(server_root)s/wsgi
+<IfDefine WSGI_MULTIPROCESS>
+WSGIDaemonProcess %(host)s:%(port)s display-name='%(process_name)s' \\
+ home='%(working_directory)s' processes=%(processes)s threads=%(threads)s \\
+ maximum-requests=%(maximum_requests)s python-eggs='%(python_eggs)s' \\
+ lang='%(lang)s' locale='%(locale)s'
+</IfDefine>
+<IfDefine !WSGI_MULTIPROCESS>
+WSGIDaemonProcess %(host)s:%(port)s display-name='%(process_name)s' \\
+ home='%(working_directory)s' threads=%(threads)s \\
+ maximum-requests=%(maximum_requests)s python-eggs='%(python_eggs)s' \\
+ lang='%(lang)s' locale='%(locale)s'
+</IfDefine>
+WSGICallableObject '%(callable_object)s'
+WSGIPassAuthorization On
+
+<IfDefine WSGI_SERVER_STATUS>
+<Location /server-status>
+ SetHandler server-status
+ Order deny,allow
+ Deny from all
+ Allow from localhost
+</Location>
+</IfDefine>
+
+<IfDefine WSGI_KEEP_ALIVE>
+KeepAlive On
+KeepAliveTimeout %(keep_alive_timeout)s
+</IfDefine>
+<IfDefine !WSGI_KEEP_ALIVE>
+KeepAlive Off
+</IfDefine>
+
+ErrorLog '%(error_log)s'
+LogLevel %(log_level)s
+
+<IfDefine WSGI_ACCESS_LOG>
+LoadModule log_config_module %(modules_directory)s/mod_log_config.so
+LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b" common
+CustomLog "%(log_directory)s/access_log" common
+</IfDefine>
+
+<IfModule mpm_prefork_module>
+ServerLimit %(prefork_server_limit)s
+StartServers %(prefork_start_servers)s
+MaxClients %(prefork_max_clients)s
+MinSpareServers %(prefork_min_spare_servers)s
+MaxSpareServers %(prefork_max_spare_servers)s
+MaxRequestsPerChild 0
+</IfModule>
+
+<IfModule mpm_worker_module>
+ServerLimit %(worker_server_limit)s
+ThreadLimit %(worker_thread_limit)s
+StartServers %(worker_start_servers)s
+MaxClients %(worker_max_clients)s
+MinSpareThreads %(worker_min_spare_threads)s
+MaxSpareThreads %(worker_max_spare_threads)s
+ThreadsPerChild %(worker_threads_per_child)s
+MaxRequestsPerChild 0
+ThreadStackSize 262144
+</IfModule>
+
+<IfModule mpm_event_module>
+ServerLimit %(worker_server_limit)s
+ThreadLimit %(worker_thread_limit)s
+StartServers %(worker_start_servers)s
+MaxClients %(worker_max_clients)s
+MinSpareThreads %(worker_min_spare_threads)s
+MaxSpareThreads %(worker_max_spare_threads)s
+ThreadsPerChild %(worker_threads_per_child)s
+MaxRequestsPerChild 0
+ThreadStackSize 262144
+</IfModule>
+
+DocumentRoot '%(document_root)s'
+
+<Directory '%(server_root)s'>
+<Files handler.wsgi>
+ Order allow,deny
+ Allow from all
+</Files>
+</Directory>
+
+<Directory '%(document_root)s'>
+ RewriteEngine On
+ RewriteCond %%{REQUEST_FILENAME} !-f
+<IfDefine WSGI_SERVER_STATUS>
+ RewriteCond %%{REQUEST_URI} !/server-status
+</IfDefine>
+ RewriteRule .* - [H=wsgi-handler]
+ Order allow,deny
+ Allow from all
+</Directory>
+
+WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\
+ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL}
+WSGIImportScript '%(server_root)s/handler.wsgi' \\
+ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL}
+"""
+
+APACHE_ALIAS_DIRECTORY_CONFIG = """
+Alias '%(mount_point)s' '%(directory)s'
+
+<Directory '%(directory)s'>
+ Order allow,deny
+ Allow from all
+</Directory>
+"""
+
+APACHE_ALIAS_FILENAME_CONFIG = """
+Alias '%(mount_point)s' '%(directory)s/%(filename)s'
+
+<Directory '%(directory)s'>
+<Files '%(filename)s'>
+ Order allow,deny
+ Allow from all
+</Files>
+</Directory>
+"""
+
+APACHE_ERROR_DOCUMENT_CONFIG = """
+ErrorDocument '%(status)s' '%(document)s'
+"""
+
+APACHE_INCLUDE_CONFIG = """
+Include '%(filename)s'
+"""
+
+APACHE_WDB_CONFIG = """
+WSGIDaemonProcess wdb-server display-name=%%{GROUP} threads=1
+WSGIImportScript '%(server_root)s/wdb-server.py' \\
+ process-group=wdb-server application-group=%%{GLOBAL}
+"""
+
+def generate_apache_config(options):
+ with open(options['httpd_conf'], 'w') as fp:
+ print(APACHE_GENERAL_CONFIG % options, file=fp)
+
+ if options['url_aliases']:
+ for mount_point, target in sorted(options['url_aliases'],
+ reverse=True):
+ target = os.path.abspath(target)
+
+ if os.path.isdir(target):
+ directory = target
+
+ print(APACHE_ALIAS_DIRECTORY_CONFIG % dict(
+ mount_point=mount_point, directory=directory),
+ file=fp)
+
+ else:
+ directory = os.path.dirname(target)
+ filename = os.path.basename(target)
+
+ print(APACHE_ALIAS_FILENAME_CONFIG % dict(
+ mount_point=mount_point, directory=directory,
+ filename=filename), file=fp)
+
+ if options['error_documents']:
+ for status, document in options['error_documents']:
+ print(APACHE_ERROR_DOCUMENT_CONFIG % dict(status=status,
+ document=document.replace("'", "\\'")), file=fp)
+
+ if options['include_files']:
+ for filename in options['include_files']:
+ filename = os.path.abspath(filename)
+ print(APACHE_INCLUDE_CONFIG % dict(filename=filename),
+ file=fp)
+
+ if options['with_wdb']:
+ print(APACHE_WDB_CONFIG % options, file=fp)
+
+_interval = 1.0
+_times = {}
+_files = []
+
+_running = False
+_queue = queue.Queue()
+_lock = threading.Lock()
+
+def _restart(path):
+ _queue.put(True)
+ prefix = 'monitor (pid=%d):' % os.getpid()
+ print('%s Change detected to "%s".' % (prefix, path), file=sys.stderr)
+ print('%s Triggering process restart.' % prefix, file=sys.stderr)
+ os.kill(os.getpid(), signal.SIGINT)
+
+def _modified(path):
+ try:
+ # If path doesn't denote a file and were previously
+ # tracking it, then it has been removed or the file type
+ # has changed so force a restart. If not previously
+ # tracking the file then we can ignore it as probably
+ # pseudo reference such as when file extracted from a
+ # collection of modules contained in a zip file.
+
+ if not os.path.isfile(path):
+ return path in _times
+
+ # Check for when file last modified.
+
+ mtime = os.stat(path).st_mtime
+ if path not in _times:
+ _times[path] = mtime
+
+ # Force restart when modification time has changed, even
+ # if time now older, as that could indicate older file
+ # has been restored.
+
+ if mtime != _times[path]:
+ return True
+ except:
+ # If any exception occured, likely that file has been
+ # been removed just before stat(), so force a restart.
+
+ return True
+
+ return False
+
+def _monitor():
+ global _files
+
+ while 1:
+ # Check modification times on all files in sys.modules.
+
+ for module in list(sys.modules.values()):
+ if not hasattr(module, '__file__'):
+ continue
+ path = getattr(module, '__file__')
+ if not path:
+ continue
+ if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
+ path = path[:-1]
+ if _modified(path):
+ return _restart(path)
+
+ # Check modification times on files which have
+ # specifically been registered for monitoring.
+
+ for path in _files:
+ if _modified(path):
+ return _restart(path)
+
+ # Go to sleep for specified interval.
+
+ try:
+ return _queue.get(timeout=_interval)
+ except:
+ pass
+
+_thread = threading.Thread(target=_monitor)
+_thread.setDaemon(True)
+
+def _exiting():
+ try:
+ _queue.put(True)
+ except:
+ pass
+ _thread.join()
+
+def track_changes(path):
+ if not path in _files:
+ _files.append(path)
+
+def start_reloader(interval=1.0):
+ global _interval
+ if interval < _interval:
+ _interval = interval
+
+ global _running
+ _lock.acquire()
+ if not _running:
+ prefix = 'monitor (pid=%d):' % os.getpid()
+ print('%s Starting change monitor.' % prefix, file=sys.stderr)
+ _running = True
+ _thread.start()
+ atexit.register(_exiting)
+ _lock.release()
+
+class ApplicationHandler(object):
+
+ def __init__(self, script, callable_object='application',
+ with_newrelic=False, with_wdb=False):
+ self.script = script
+ self.callable_object = callable_object
+
+ self.module = imp.new_module('__wsgi__')
+ self.module.__file__ = script
+
+ with open(script, 'r') as fp:
+ code = compile(fp.read(), script, 'exec', dont_inherit=True)
+ exec(code, self.module.__dict__)
+
+ self.application = getattr(self.module, callable_object)
+
+ sys.modules['__wsgi__'] = self.module
+
+ try:
+ self.mtime = os.path.getmtime(script)
+ except:
+ self.mtime = None
+
+ if with_newrelic:
+ self.setup_newrelic()
+
+ if with_wdb:
+ self.setup_wdb()
+
+ def setup_newrelic(self):
+ import newrelic.agent
+
+ config_file = os.environ.get('NEW_RELIC_CONFIG_FILE')
+ environment = os.environ.get('NEW_RELIC_ENVIRONMENT')
+
+ global_settings = newrelic.agent.global_settings()
+ if global_settings.log_file is None:
+ global_settings.log_file = 'stderr'
+
+ newrelic.agent.initialize(config_file, environment)
+ newrelic.agent.register_application()
+
+ self.application = newrelic.agent.WSGIApplicationWrapper(
+ self.application)
+
+ def setup_wdb(self):
+ from wdb.ext import WdbMiddleware
+ self.application = WdbMiddleware(self.application)
+
+ def reload_required(self, environ):
+ try:
+ mtime = os.path.getmtime(self.script)
+ except:
+ mtime = None
+
+ return mtime != self.mtime
+
+ def handle_request(self, environ, start_response):
+ # Strip out the leading component due to internal redirect in
+ # Apache when using web application as fallback resource.
+
+ script_name = environ.get('SCRIPT_NAME')
+ path_info = environ.get('PATH_INFO')
+
+ environ['SCRIPT_NAME'] = ''
+ environ['PATH_INFO'] = script_name + path_info
+
+ return self.application(environ, start_response)
+
+ def __call__(self, environ, start_response):
+ return self.handle_request(environ, start_response)
+
+WSGI_HANDLER_SCRIPT = """
+import mod_wsgi.server
+
+script = '%(script)s'
+callable_object = '%(callable_object)s'
+with_newrelic = %(with_newrelic)s
+with_wdb = %(with_wdb)s
+
+handler = mod_wsgi.server.ApplicationHandler(script, callable_object,
+ with_newrelic=with_newrelic, with_wdb=with_wdb)
+
+reload_required = handler.reload_required
+handle_request = handler.handle_request
+
+if %(reload_on_changes)s:
+ mod_wsgi.server.start_reloader()
+"""
+
+WSGI_DEFAULT_SCRIPT = """
+def application(environ, start_response):
+ status = '200 OK'
+ output = b'Hello World!'
+
+ response_headers = [('Content-type', 'text/plain'),
+ ('Content-Length', str(len(output)))]
+ start_response(status, response_headers)
+
+ return [output]
+"""
+
+def generate_wsgi_handler_script(options):
+ path = os.path.join(options['server_root'], 'handler.wsgi')
+ with open(path, 'w') as fp:
+ print(WSGI_HANDLER_SCRIPT % options, file=fp)
+
+ path = os.path.join(options['server_root'], 'default.wsgi')
+ if not os.path.exists(path):
+ with open(path, 'w') as fp:
+ print(WSGI_DEFAULT_SCRIPT % options, file=fp)
+
+WDB_SERVER_SCRIPT = """
+from wdb_server import server
+from tornado.ioloop import IOLoop
+from tornado.options import options
+from wdb_server.sockets import handle_connection
+from tornado.netutil import bind_sockets, add_accept_handler
+from threading import Thread
+
+def run_server():
+ ioloop = IOLoop.instance()
+ sockets = bind_sockets(options.socket_port)
+ for socket in sockets:
+ add_accept_handler(socket, handle_connection, ioloop)
+ server.listen(options.server_port)
+ ioloop.start()
+
+thread = Thread(target=run_server)
+thread.setDaemon(True)
+thread.start()
+"""
+
+def generate_wdb_server_script(options):
+ path = os.path.join(options['server_root'], 'wdb-server.py')
+ with open(path, 'w') as fp:
+ print(WDB_SERVER_SCRIPT, file=fp)
+
+WSGI_CONTROL_SCRIPT = """
+#!/bin/sh
+
+HTTPD="%(httpd_executable)s %(httpd_arguments)s"
+
+WSGI_RUN_USER="${WSGI_RUN_USER:-%(user)s}"
+WSGI_RUN_GROUP="${WSGI_RUN_GROUP:-%(group)s}"
+
+export WSGI_RUN_USER
+export WSGI_RUN_GROUP
+
+ACMD="$1"
+ARGV="$@"
+
+if test -f %(server_root)s/envvars; then
+ . %(server_root)s/envvars
+fi
+
+STATUSURL="http://%(host)s:%(port)s/server-status"
+
+if [ "x$ARGV" = "x" ] ; then
+ ARGV="-h"
+fi
+
+case $ACMD in
+start|stop|restart|graceful|graceful-stop)
+ exec $HTTPD -k $ARGV
+ ;;
+configtest)
+ exec $HTTPD -t
+ ;;
+status)
+ exec %(python_executable)s -m webbrowser -t $STATUSURL
+ ;;
+*)
+ exec $HTTPD $ARGV
+esac
+"""
+
+APACHE_ENVVARS_FILE = """
+. %(envvars_script)s
+"""
+
+def generate_control_scripts(options):
+ path = os.path.join(options['server_root'], 'wsgi-server')
+ with open(path, 'w') as fp:
+ print(WSGI_CONTROL_SCRIPT.lstrip() % options, file=fp)
+
+ os.chmod(path, 0o755)
+
+ path = os.path.join(options['server_root'], 'envvars')
+ with open(path, 'w') as fp:
+ if options['envvars_script']:
+ print(APACHE_ENVVARS_FILE.lstrip() % options, file=fp)
+
+option_list = (
+ optparse.make_option('--host', default='localhost', metavar='IP-ADDRESS'),
+ optparse.make_option('--port', default=8000, type='int', metavar='NUMBER'),
+
+ optparse.make_option('--processes', type='int', metavar='NUMBER'),
+ optparse.make_option('--threads', type='int', default=5, metavar='NUMBER'),
+
+ optparse.make_option('--callable-object', default='application',
+ metavar='NAME'),
+
+ optparse.make_option('--limit-request-body', type='int', default=10485760,
+ metavar='NUMBER'),
+ optparse.make_option('--maximum-requests', type='int', default=0,
+ metavar='NUMBER'),
+ optparse.make_option('--reload-on-changes', action='store_true',
+ default=False),
+
+ optparse.make_option('--user', default=default_run_user(), metavar='NAME'),
+ optparse.make_option('--group', default=default_run_group(), metavar='NAME'),
+
+ optparse.make_option('--document-root', metavar='DIRECTORY-PATH'),
+
+ optparse.make_option('--url-alias', action='append', nargs=2,
+ dest='url_aliases', metavar='URL-PATH FILE-PATH|DIRECTORY-PATH'),
+ optparse.make_option('--error-document', action='append', nargs=2,
+ dest='error_documents', metavar='STATUS URL-PATH'),
+
+ optparse.make_option('--keep-alive-timeout', type='int', default=0,
+ metavar='SECONDS'),
+
+ optparse.make_option('--server-status', action='store_true', default=False),
+ optparse.make_option('--include-file', action='append',
+ dest='include_files', metavar='FILE-PATH'),
+
+ optparse.make_option('--envvars-script', metavar='FILE-PATH'),
+ optparse.make_option('--lang', default='en_US.UTF-8', metavar='NAME'),
+ optparse.make_option('--locale', default='en_US.UTF-8', metavar='NAME'),
+
+ optparse.make_option('--working-directory', metavar='DIRECTORY-PATH'),
+
+ # XXX What is the --daemonize option for?
+ optparse.make_option('--daemonize', action='store_true', default=False),
+ optparse.make_option('--pid-file', metavar='FILE-PATH'),
+
+ optparse.make_option('--server-root', metavar='DIRECTORY-PATH'),
+ optparse.make_option('--log-directory', metavar='DIRECTORY-PATH'),
+ optparse.make_option('--log-level', default='info', metavar='NAME'),
+ optparse.make_option('--access-log', action='store_true', default=False),
+ optparse.make_option('--startup-log', action='store_true', default=False),
+
+ optparse.make_option('--python-eggs', metavar='DIRECTORY-PATH'),
+
+ optparse.make_option('--httpd-executable', default=apxs_config.HTTPD,
+ metavar='FILE-PATH'),
+ optparse.make_option('--modules-directory', default=apxs_config.LIBEXECDIR,
+ metavar='DIRECTORY-PATH'),
+ optparse.make_option('--mime-types', default=find_mimetypes(),
+ metavar='FILE-PATH'),
+
+ optparse.make_option('--with-newrelic', action='store_true', default=False),
+ optparse.make_option('--with-wdb', action='store_true', default=False),
+)
+
+def cmd_setup_server(params, usage=None):
+ formatter = optparse.IndentedHelpFormatter()
+ formatter.set_long_opt_delimiter(' ')
+
+ usage = usage or '%prog setup-server script [options]'
+ parser = optparse.OptionParser(usage=usage, option_list=option_list,
+ formatter=formatter)
+
+ (options, args) = parser.parse_args(params)
+
+ return _cmd_setup_server(args, vars(options))
+
+def _cmd_setup_server(args, options):
+ options['mod_wsgi_so'] = where()
+
+ options['working_directory'] = options['working_directory'] or os.getcwd()
+
+ options['process_name'] = '(wsgi:%s:%s:%s)' % (options['host'],
+ options['port'], os.getuid())
+
+ if not options['server_root']:
+ options['server_root'] = '/tmp/apache-%s:%s:%s' % (options['host'],
+ options['port'], os.getuid())
+
+ try:
+ os.mkdir(options['server_root'])
+ except Exception:
+ pass
+
+ if not args:
+ options['script'] = os.path.join(options['server_root'],
+ 'default.wsgi')
+ else:
+ options['script'] = os.path.abspath(args[0])
+
+ options['script_directory'] = os.path.dirname(options['script'])
+ options['script_filename'] = os.path.basename(options['script'])
+
+ if not os.path.isabs(options['server_root']):
+ options['server_root'] = os.path.abspath(options['server_root'])
+
+ if not options['document_root']:
+ options['document_root'] = os.path.join(options['server_root'],
+ 'htdocs')
+
+ try:
+ os.mkdir(options['document_root'])
+ except Exception:
+ pass
+
+ if not os.path.isabs(options['document_root']):
+ options['document_root'] = os.path.abspath(options['document_root'])
+
+ if not options['log_directory']:
+ options['log_directory'] = options['server_root']
+
+ try:
+ os.mkdir(options['log_directory'])
+ except Exception:
+ pass
+
+ if not os.path.isabs(options['log_directory']):
+ options['log_directory'] = os.path.abspath(options['log_directory'])
+
+ options['error_log'] = os.path.join(options['log_directory'], 'error_log')
+
+ options['pid_file'] = ((options['pid_file'] and os.path.abspath(
+ options['pid_file'])) or os.path.join(options['server_root'],
+ 'httpd.pid'))
+
+ options['python_eggs'] = (os.path.abspath(options['python_eggs']) if
+ options['python_eggs'] is not None else None)
+
+ if options['python_eggs'] is None:
+ options['python_eggs'] = os.path.join(options['server_root'],
+ 'python-eggs')
+
+ try:
+ os.mkdir(options['python_eggs'])
+ except Exception:
+ pass
+
+ options['multiprocess'] = options['processes'] is not None
+ options['processes'] = options['processes'] or 1
+
+ options['python_home'] = sys.prefix
+
+ options['keep_alive'] = options['keep_alive_timeout'] != 0
+
+ generate_wsgi_handler_script(options)
+
+ if options['with_wdb']:
+ generate_wdb_server_script(options)
+
+ options['prefork_max_clients'] = int(1.25 * options['processes'] *
+ options['threads'])
+ options['prefork_server_limit'] = options['prefork_max_clients']
+ options['prefork_start_servers'] = max(1, int(0.1 *
+ options['prefork_max_clients']))
+ options['prefork_min_spare_servers'] = options['prefork_start_servers']
+ options['prefork_max_spare_servers'] = max(1, int(0.4 *
+ options['prefork_max_clients']))
+
+ options['worker_max_clients'] = int(1.25*options['processes']*
+ options['threads'])
+ options['worker_threads_per_child'] = int(min(
+ options['worker_max_clients'], 25))
+ options['worker_thread_limit'] = options['worker_threads_per_child']
+
+ count = options['worker_max_clients']/options['worker_threads_per_child']
+ options['worker_server_limit'] = int(math.floor(count))
+ if options['worker_server_limit'] != count:
+ options['worker_server_limit'] += 1
+
+ options['worker_max_clients'] = (options['worker_server_limit'] *
+ options['worker_threads_per_child'])
+
+ options['worker_start_servers'] = max(1, int(0.1 *
+ options['worker_server_limit']))
+ options['worker_min_spare_threads'] = max(
+ options['worker_threads_per_child'],
+ int(0.2 * options['worker_server_limit']) *
+ options['worker_threads_per_child'])
+ options['worker_max_spare_threads'] = max(
+ options['worker_threads_per_child'],
+ int(0.4 * options['worker_server_limit']) *
+ options['worker_threads_per_child'])
+
+ options['httpd_conf'] = os.path.join(options['server_root'], 'httpd.conf')
+
+ options['httpd_executable'] = os.environ.get('HTTPD',
+ options['httpd_executable'])
+
+ if not os.path.isabs(options['httpd_executable']):
+ options['httpd_executable'] = find_program(
+ [options['httpd_executable']], 'httpd', ['/usr/sbin'])
+
+ options['envvars_script'] = (os.path.abspath(
+ options['envvars_script']) if options['envvars_script'] is
+ not None else None)
+
+ options['httpd_arguments_list'] = []
+
+ if options['startup_log']:
+ options['startup_log_filename']= os.path.join(
+ options['log_directory'], 'startup.log')
+
+ options['httpd_arguments_list'].append('-E')
+ options['httpd_arguments_list'].append(
+ options['startup_log_filename'])
+
+ if options['port'] == 80:
+ options['url'] = 'http://%s/' % options['host']
+ else:
+ options['url'] = 'http://%s:%s/' % (options['host'],
+ options['port'])
+
+ if options['server_status']:
+ options['httpd_arguments_list'].append('-DWSGI_SERVER_STATUS')
+ if options['access_log']:
+ options['httpd_arguments_list'].append('-DWSGI_ACCESS_LOG')
+ if options['keep_alive'] != 0:
+ options['httpd_arguments_list'].append('-DWSGI_KEEP_ALIVE')
+ if options['multiprocess']:
+ options['httpd_arguments_list'].append('-DWSGI_MULTIPROCESS')
+
+ options['httpd_arguments'] = '-f %s %s' % (options['httpd_conf'],
+ ' '.join(options['httpd_arguments_list']))
+
+ options['python_executable'] = sys.executable
+
+ generate_apache_config(options)
+ generate_control_scripts(options)
+
+ print('Server URL :', options['url'])
+
+ if options['server_status']:
+ print('Server Status :', '%sserver-status' % options['url'])
+
+ print('Server Root :', options['server_root'])
+ print('Server Conf :', options['httpd_conf'])
+
+ print('Error Log :', options['error_log'])
+
+ if options['access_log']:
+ print('Access Log :', os.path.join(options['log_directory'],
+ 'access_log'))
+
+ return options
+
+def cmd_run_server(params):
+ usage = '%prog run-server script [options]'
+
+ options = cmd_setup_server(params, usage)
+
+ executable = os.path.join(options['server_root'], 'wsgi-server')
+ name = executable.ljust(len(options['process_name']))
+ os.execl(executable, name, 'start', '-DNO_DETACH')
+
+def cmd_install_so(params):
+ formatter = optparse.IndentedHelpFormatter()
+ formatter.set_long_opt_delimiter(' ')
+
+ usage = '%prog install-so [options]'
+ parser = optparse.OptionParser(usage=usage, formatter=formatter)
+
+ parser.add_option('--modules-directory', metavar='DIRECTORY',
+ default=apxs_config.LIBEXECDIR)
+
+ (options, args) = parser.parse_args(params)
+
+ if len(args) != 0:
+ parser.error('Incorrect number of arguments.')
+
+ target = os.path.abspath(os.path.join(options.modules_directory,
+ MOD_WSGI_SO))
+
+ shutil.copyfile(where(), target)
+
+ print('LoadModule wsgi_module %s' % target)
+
+def cmd_so_location(params):
+ formatter = optparse.IndentedHelpFormatter()
+ formatter.set_long_opt_delimiter(' ')
+
+ usage = '%prog so_location'
+ parser = optparse.OptionParser(usage=usage, formatter=formatter)
+
+ (options, args) = parser.parse_args(params)
+
+ if len(args) != 0:
+ parser.error('Incorrect number of arguments.')
+
+ print(where())
+
+main_usage="""
+%prog command [params]
+
+Commands:
+ install-so
+ so-location
+ run-server
+ setup-server
+"""
+
+def main():
+ parser = optparse.OptionParser(main_usage.strip())
+
+ args = sys.argv[1:]
+
+ if not args:
+ parser.error('No command was specified.')
+
+ command = args.pop(0)
+
+ args = [os.path.expandvars(arg) for arg in args]
+
+ if command == 'install-so':
+ cmd_install_so(args)
+ elif command in ('run-server', 'serve'):
+ cmd_run_server(args)
+ elif command == 'setup-server':
+ cmd_setup_server(args)
+ elif command == 'so-location':
+ cmd_so_location(args)
+ else:
+ parser.error('Invalid command was specified.')
+
+if __name__ == '__main__':
+ main()
diff --git a/src/server/management/__init__.py b/src/server/management/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/server/management/__init__.py
diff --git a/src/server/management/commands/__init__.py b/src/server/management/commands/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/server/management/commands/__init__.py
diff --git a/src/server/management/commands/runapache.py b/src/server/management/commands/runapache.py
new file mode 100644
index 0000000..c1b64b3
--- /dev/null
+++ b/src/server/management/commands/runapache.py
@@ -0,0 +1,51 @@
+import os
+import sys
+import inspect
+
+from django.core.management.base import BaseCommand
+
+import mod_wsgi.server
+
+class Command(BaseCommand):
+ option_list = BaseCommand.option_list + mod_wsgi.server.option_list
+ args = '[options]'
+ help = 'Starts Apache/mod_wsgi web server.'
+
+ def handle(self, *args, **options):
+ self.stdout.write('Successfully ran command.')
+
+ from django.conf import settings
+ wsgi_application = settings.WSGI_APPLICATION
+
+ fields = wsgi_application.split('.')
+
+ module_name = '.'.join(fields[:-1])
+ callable_object = fields[-1]
+
+ __import__(module_name)
+
+ script_file = inspect.getsourcefile(sys.modules[module_name])
+
+ args = [script_file]
+ options['callable_object'] = callable_object
+
+ options['working_directory'] = settings.BASE_DIR
+
+ url_aliases = options.setdefault('url_aliases') or []
+
+ try:
+ if settings.STATIC_URL and settings.STATIC_URL.startswith('/'):
+ if settings.STATIC_ROOT:
+ url_aliases.insert(0,
+ (settings.STATIC_URL.rstrip('/') or '/',
+ settings.STATIC_ROOT))
+ except AttributeError:
+ pass
+
+ options['url_aliases'] = url_aliases
+
+ options = mod_wsgi.server._cmd_setup_server(args, options)
+
+ executable = os.path.join(options['server_root'], 'wsgi-server')
+ name = executable.ljust(len(options['process_name']))
+ os.execl(executable, name, 'start', '-DNO_DETACH')
diff --git a/mod_wsgi.c b/src/server/mod_wsgi.c
index 19bc6a3..b10fab2 100644
--- a/mod_wsgi.c
+++ b/src/server/mod_wsgi.c
@@ -1,7 +1,7 @@
-/* vim: set sw=4 expandtab : */
+/* ------------------------------------------------------------------------- */
/*
- * Copyright 2007-2012 GRAHAM DUMPLETON
+ * Copyright 2007-2013 GRAHAM DUMPLETON
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,195 +16,29 @@
* limitations under the License.
*/
-/*
- * Enabled access to Apache private API and data structures. Need to do
- * this to access the following:
- *
- * In Apache 1.3 it is not possible to access ap_check_cmd_context()
- * where as this was made public in Apache 2.0.
- *
- * In Apache 2.X need access to ap_create_request_config().
- *
- * In Apache 2.X need access to core_module and core_request_config.
- *
- */
-
-#define CORE_PRIVATE 1
-
-#include "httpd.h"
-
-#if !defined(HTTPD_ROOT)
-#error Sorry, Apache developer package does not appear to be installed.
-#endif
-
-#if !defined(AP_SERVER_MAJORVERSION_NUMBER)
-#if AP_MODULE_MAGIC_AT_LEAST(20010224,0)
-#define AP_SERVER_MAJORVERSION_NUMBER 2
-#define AP_SERVER_MINORVERSION_NUMBER 0
-#define AP_SERVER_PATCHLEVEL_NUMBER 0
-#else
-#define AP_SERVER_MAJORVERSION_NUMBER 1
-#define AP_SERVER_MINORVERSION_NUMBER 3
-#define AP_SERVER_PATCHLEVEL_NUMBER 0
-#endif
-#endif
-
-#if !defined(AP_SERVER_BASEVERSION)
-#define AP_SERVER_BASEVERSION SERVER_BASEVERSION
-#endif
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-typedef int apr_status_t;
-#define APR_SUCCESS 0
-typedef pool apr_pool_t;
-typedef unsigned int apr_port_t;
-#include "ap_ctype.h"
-#include "ap_alloc.h"
-#define apr_isspace ap_isspace
-#define apr_table_make ap_make_table
-#define apr_table_get ap_table_get
-#define apr_table_set ap_table_set
-#define apr_table_setn ap_table_setn
-#define apr_table_add ap_table_add
-#define apr_table_elts ap_table_elts
-#define apr_array_make ap_make_array
-#define apr_array_push ap_push_array
-#define apr_array_cat ap_array_cat
-#define apr_array_append ap_append_arrays
-typedef array_header apr_array_header_t;
-typedef table apr_table_t;
-typedef table_entry apr_table_entry_t;
-typedef int apr_size_t;
-typedef unsigned long apr_off_t;
-#define apr_psprintf ap_psprintf
-#define apr_pstrndup ap_pstrndup
-#define apr_pstrdup ap_pstrdup
-#define apr_pstrcat ap_pstrcat
-#define apr_pcalloc ap_pcalloc
-#define apr_palloc ap_palloc
-#define apr_isalnum isalnum
-#define apr_toupper toupper
-typedef time_t apr_time_t;
-#include "http_config.h"
-typedef int apr_lockmech_e;
-#else
-#include "apr_lib.h"
-#include "ap_mpm.h"
-#include "ap_compat.h"
-#include "apr_tables.h"
-#include "apr_strings.h"
-#include "http_config.h"
-#include "ap_listen.h"
-#include "apr_version.h"
-
-#include "apr_optional.h"
-
-APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
-APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, (apr_pool_t *,
- server_rec *, conn_rec *, request_rec *, char *));
-
-#endif
-
-#include "ap_config.h"
-#include "http_core.h"
-#include "http_log.h"
-#include "http_main.h"
-#include "http_protocol.h"
-#include "http_request.h"
-#include "util_script.h"
-#include "util_md5.h"
-
-#ifndef APR_FPROT_GWRITE
-#define APR_FPROT_GWRITE APR_GWRITE
-#endif
-#ifndef APR_FPROT_WWRITE
-#define APR_FPROT_WWRITE APR_WWRITE
-#endif
+/* ------------------------------------------------------------------------- */
-#if !AP_MODULE_MAGIC_AT_LEAST(20050127,0)
-/* Debian backported ap_regex_t to Apache 2.0 and
- * thus made official version checking break. */
-#ifndef AP_REG_EXTENDED
-typedef regex_t ap_regex_t;
-typedef regmatch_t ap_regmatch_t;
-#define AP_REG_EXTENDED REG_EXTENDED
-#endif
-#endif
+#include "wsgi_apache.h"
+#include "wsgi_python.h"
-#if !AP_MODULE_MAGIC_AT_LEAST(20081201,0)
-#define ap_unixd_config unixd_config
+#ifdef HAVE_SYS_PRCTL_H
+#include <sys/prctl.h>
#endif
#ifndef WIN32
#include <pwd.h>
#endif
-#include "Python.h"
-
-#if !defined(PY_VERSION_HEX)
-#error Sorry, Python developer package does not appear to be installed.
-#endif
-
-#if PY_VERSION_HEX <= 0x02030000
-#error Sorry, mod_wsgi requires at least Python 2.3.0 for Python 2.X.
-#endif
-
-#if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000
-#error Sorry, mod_wsgi requires at least Python 3.1.0 for Python 3.X.
-#endif
-
-#if !defined(WITH_THREAD)
-#error Sorry, mod_wsgi requires that Python supporting thread.
-#endif
-
-#include "compile.h"
-#include "node.h"
-#include "osdefs.h"
-
-#ifndef PyVarObject_HEAD_INIT
-#define PyVarObject_HEAD_INIT(type, size) \
- PyObject_HEAD_INIT(type) size,
-#endif
-
-#if PY_MAJOR_VERSION >= 3
-#define PyStringObject PyBytesObject
-#define PyString_Check PyBytes_Check
-#define PyString_Size PyBytes_Size
-#define PyString_AsString PyBytes_AsString
-#define PyString_FromString PyBytes_FromString
-#define PyString_FromStringAndSize PyBytes_FromStringAndSize
-#define PyString_AS_STRING PyBytes_AS_STRING
-#define PyString_GET_SIZE PyBytes_GET_SIZE
-#define _PyString_Resize _PyBytes_Resize
-#endif
-
-#ifndef WIN32
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-#if APR_HAS_OTHER_CHILD && APR_HAS_THREADS && APR_HAS_FORK
-#define MOD_WSGI_WITH_DAEMONS 1
-#endif
-#endif
-#endif
-
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-#define MOD_WSGI_WITH_BUCKETS 1
-#define MOD_WSGI_WITH_AAA_HANDLERS 1
-#endif
-
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
static PyTypeObject Auth_Type;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
#if AP_SERVER_MINORVERSION_NUMBER >= 2
#define MOD_WSGI_WITH_AUTHN_PROVIDER 1
#endif
-#endif
#if AP_MODULE_MAGIC_AT_LEAST(20060110,0)
#define MOD_WSGI_WITH_AUTHZ_PROVIDER 1
#if AP_MODULE_MAGIC_AT_LEAST(20100919,0)
#define MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED 1
#endif
#endif
-#endif
#if defined(MOD_WSGI_WITH_AUTHN_PROVIDER)
#include "mod_auth.h"
@@ -214,373 +48,53 @@ static PyTypeObject Auth_Type;
#endif
#endif
-#if defined(MOD_WSGI_WITH_DAEMONS)
+/* Local project header files. */
-#if !AP_MODULE_MAGIC_AT_LEAST(20051115,0)
-static void ap_close_listeners(void)
-{
- ap_listen_rec *lr;
+#include "wsgi_version.h"
+#include "wsgi_convert.h"
+#include "wsgi_validate.h"
+#include "wsgi_interp.h"
+#include "wsgi_server.h"
+#include "wsgi_logger.h"
+#include "wsgi_restrict.h"
+#include "wsgi_stream.h"
+#include "wsgi_metrics.h"
+#include "wsgi_daemon.h"
+#include "wsgi_buckets.h"
- for (lr = ap_listeners; lr; lr = lr->next) {
- apr_socket_close(lr->sd);
- lr->active = 0;
- }
-}
-#endif
-
-#if (APR_MAJOR_VERSION == 0) && \
- (APR_MINOR_VERSION == 9) && \
- (APR_PATCH_VERSION < 5)
-static apr_status_t apr_unix_file_cleanup(void *thefile)
-{
- apr_file_t *file = thefile;
-
- return apr_file_close(file);
-}
+/* Module information. */
-static apr_status_t apr_os_pipe_put_ex(apr_file_t **file,
- apr_os_file_t *thefile,
- int register_cleanup,
- apr_pool_t *pool)
-{
- apr_status_t rv;
-
- rv = apr_os_pipe_put(file, thefile, pool);
-
- if (register_cleanup) {
- apr_pool_cleanup_register(pool, (void *)(*file),
- apr_unix_file_cleanup,
- apr_pool_cleanup_null);
- }
-
- return rv;
-}
-#endif
-
-#endif
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-
-static char *apr_off_t_toa(apr_pool_t *p, apr_off_t n)
-{
- const int BUFFER_SIZE = sizeof(apr_off_t) * 3 + 2;
- char *buf = apr_palloc(p, BUFFER_SIZE);
- char *start = buf + BUFFER_SIZE - 1;
- int negative;
- if (n < 0) {
- negative = 1;
- n = -n;
- }
- else {
- negative = 0;
- }
- *start = 0;
- do {
- *--start = '0' + (char)(n % 10);
- n /= 10;
- } while (n);
- if (negative) {
- *--start = '-';
- }
- return start;
-}
-
-#endif
-
-#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
-typedef apr_uint16_t apr_wchar_t;
-
-APR_DECLARE(apr_status_t) apr_conv_utf8_to_ucs2(const char *in,
- apr_size_t *inbytes,
- apr_wchar_t *out,
- apr_size_t *outwords);
-
-static apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr,
- apr_size_t retlen,
- const char* srcstr)
-{
- /* TODO: The computations could preconvert the string to determine
- * the true size of the retstr, but that's a memory over speed
- * tradeoff that isn't appropriate this early in development.
- *
- * Allocate the maximum string length based on leading 4
- * characters of \\?\ (allowing nearly unlimited path lengths)
- * plus the trailing null, then transform /'s into \\'s since
- * the \\?\ form doesn't allow '/' path seperators.
- *
- * Note that the \\?\ form only works for local drive paths, and
- * \\?\UNC\ is needed UNC paths.
- */
- apr_size_t srcremains = strlen(srcstr) + 1;
- apr_wchar_t *t = retstr;
- apr_status_t rv;
-
- /* This is correct, we don't twist the filename if it is will
- * definately be shorter than 248 characters. It merits some
- * performance testing to see if this has any effect, but there
- * seem to be applications that get confused by the resulting
- * Unicode \\?\ style file names, especially if they use argv[0]
- * or call the Win32 API functions such as GetModuleName, etc.
- * Not every application is prepared to handle such names.
- *
- * Note also this is shorter than MAX_PATH, as directory paths
- * are actually limited to 248 characters.
- *
- * Note that a utf-8 name can never result in more wide chars
- * than the original number of utf-8 narrow chars.
- */
- if (srcremains > 248) {
- if (srcstr[1] == ':' && (srcstr[2] == '/' || srcstr[2] == '\\')) {
- wcscpy (retstr, L"\\\\?\\");
- retlen -= 4;
- t += 4;
- }
- else if ((srcstr[0] == '/' || srcstr[0] == '\\')
- && (srcstr[1] == '/' || srcstr[1] == '\\')
- && (srcstr[2] != '?')) {
- /* Skip the slashes */
- srcstr += 2;
- srcremains -= 2;
- wcscpy (retstr, L"\\\\?\\UNC\\");
- retlen -= 8;
- t += 8;
- }
- }
-
- if (rv = apr_conv_utf8_to_ucs2(srcstr, &srcremains, t, &retlen)) {
- return (rv == APR_INCOMPLETE) ? APR_EINVAL : rv;
- }
- if (srcremains) {
- return APR_ENAMETOOLONG;
- }
- for (; *t; ++t)
- if (*t == L'/')
- *t = L'\\';
- return APR_SUCCESS;
-}
-#endif
-
-/* Compatibility macros for log level and status. */
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-#define WSGI_LOG_LEVEL(l) l
-#define WSGI_LOG_LEVEL_AND_STATUS(l, e) l | (!e ? APLOG_NOERRNO : 0)
-#else
-#define WSGI_LOG_LEVEL(l) l, 0
-#define WSGI_LOG_LEVEL_AND_STATUS(l, e) l, e
-#endif
-
-#define WSGI_LOG_EMERG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_EMERG, e)
-#define WSGI_LOG_ALERT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ALERT, e)
-#define WSGI_LOG_CRIT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_CRIT, e)
-#define WSGI_LOG_ERR(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ERR, e)
-#define WSGI_LOG_WARNING(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_WARNING, e)
-#define WSGI_LOG_NOTICE(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_NOTICE, e)
-#define WSGI_LOG_INFO(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_INFO, e)
-#define WSGI_LOG_DEBUG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_DEBUG, e)
-
-/* Version and module information. */
-
-#define MOD_WSGI_MAJORVERSION_NUMBER 3
-#define MOD_WSGI_MINORVERSION_NUMBER 4
-#define MOD_WSGI_VERSION_STRING "3.4"
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-module MODULE_VAR_EXPORT wsgi_module;
-#else
module AP_MODULE_DECLARE_DATA wsgi_module;
-#endif
-
-/* Constants. */
-
-#define WSGI_RELOAD_MODULE 0
-#define WSGI_RELOAD_PROCESS 1
-
-/* Python interpreter state. */
-
-static PyThreadState *wsgi_main_tstate = NULL;
-
-/* Base server object. */
-
-static server_rec *wsgi_server = NULL;
/* Process information. */
-static pid_t wsgi_parent_pid = 0;
static int wsgi_multiprocess = 1;
static int wsgi_multithread = 1;
/* Daemon information. */
-static const char *wsgi_daemon_group = "";
-
static apr_array_header_t *wsgi_daemon_list = NULL;
static apr_pool_t *wsgi_parent_pool = NULL;
-static apr_pool_t *wsgi_daemon_pool = NULL;
static int volatile wsgi_daemon_shutdown = 0;
+static int volatile wsgi_daemon_graceful = 0;
#if defined(MOD_WSGI_WITH_DAEMONS)
static apr_interval_time_t wsgi_deadlock_timeout = 0;
-static apr_interval_time_t wsgi_inactivity_timeout = 0;
+static apr_interval_time_t wsgi_idle_timeout = 0;
+static apr_interval_time_t wsgi_busy_timeout = 0;
+static apr_interval_time_t wsgi_graceful_timeout = 0;
static apr_time_t volatile wsgi_deadlock_shutdown_time = 0;
-static apr_time_t volatile wsgi_inactivity_shutdown_time = 0;
-static apr_thread_mutex_t* wsgi_shutdown_lock = NULL;
+static apr_time_t volatile wsgi_idle_shutdown_time = 0;
+static apr_time_t volatile wsgi_busy_shutdown_time = 0;
+static apr_time_t volatile wsgi_graceful_shutdown_time = 0;
#endif
/* Script information. */
static apr_array_header_t *wsgi_import_list = NULL;
-/* Configuration objects. */
-
-typedef struct {
- const char *location;
- const char *application;
- ap_regex_t *regexp;
- const char *process_group;
- const char *application_group;
- const char *callable_object;
- int pass_authorization;
-} WSGIAliasEntry;
-
-typedef struct {
- const char *handler_script;
- const char *process_group;
- const char *application_group;
- const char *callable_object;
- const char *pass_authorization;
-} WSGIScriptFile;
-
-typedef struct {
- apr_pool_t *pool;
-
- apr_array_header_t *alias_list;
-
- const char *socket_prefix;
- apr_lockmech_e lock_mechanism;
-
- int verbose_debugging;
-
- apr_array_header_t *python_warnings;
-
- int python_optimize;
- int py3k_warning_flag;
- int dont_write_bytecode;
-
- const char *lang;
- const char *locale;
-
- const char *python_home;
- const char *python_path;
- const char *python_eggs;
-
- int restrict_embedded;
- int restrict_stdin;
- int restrict_stdout;
- int restrict_signal;
-
- int case_sensitivity;
-
- apr_table_t *restrict_process;
-
- const char *process_group;
- const char *application_group;
- const char *callable_object;
-
- WSGIScriptFile *dispatch_script;
-
- int pass_apache_request;
- int pass_authorization;
- int script_reloading;
- int error_override;
- int chunked_request;
-
- int enable_sendfile;
-
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
- apr_hash_t *handler_scripts;
-#endif
-} WSGIServerConfig;
-
-static WSGIServerConfig *wsgi_server_config = NULL;
-
-static WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p)
-{
- WSGIScriptFile *object = NULL;
-
- object = (WSGIScriptFile *)apr_pcalloc(p, sizeof(WSGIScriptFile));
-
- object->handler_script = NULL;
- object->application_group = NULL;
- object->process_group = NULL;
-
- return object;
-}
-
-static WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p)
-{
- WSGIServerConfig *object = NULL;
-
- object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig));
-
- object->pool = p;
-
- object->alias_list = NULL;
-
- object->socket_prefix = NULL;
-
-#if defined(MOD_WSGI_WITH_DAEMONS)
- object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi";
- object->socket_prefix = ap_server_root_relative(p, object->socket_prefix);
-#endif
-
- object->verbose_debugging = 0;
-
- object->python_warnings = NULL;
-
- object->py3k_warning_flag = -1;
- object->python_optimize = -1;
- object->dont_write_bytecode = -1;
-
- object->lang = NULL;
- object->locale = NULL;
-
- object->python_home = NULL;
- object->python_path = NULL;
- object->python_eggs = NULL;
-
- object->restrict_embedded = -1;
- object->restrict_stdin = -1;
- object->restrict_stdout = -1;
- object->restrict_signal = -1;
-
-#if defined(WIN32) || defined(DARWIN)
- object->case_sensitivity = 0;
-#else
- object->case_sensitivity = 1;
-#endif
-
- object->restrict_process = NULL;
-
- object->process_group = NULL;
- object->application_group = NULL;
- object->callable_object = NULL;
-
- object->dispatch_script = NULL;
-
- object->pass_apache_request = -1;
- object->pass_authorization = -1;
- object->script_reloading = -1;
- object->error_override = -1;
- object->chunked_request = -1;
-
- object->enable_sendfile = -1;
-
- return object;
-}
-
static void *wsgi_create_server_config(apr_pool_t *p, server_rec *s)
{
WSGIServerConfig *config = NULL;
@@ -670,7 +184,6 @@ static void *wsgi_merge_server_config(apr_pool_t *p, void *base_conf,
else
config->enable_sendfile = parent->enable_sendfile;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (!child->handler_scripts)
config->handler_scripts = parent->handler_scripts;
else if (!parent->handler_scripts)
@@ -679,7 +192,6 @@ static void *wsgi_merge_server_config(apr_pool_t *p, void *base_conf,
config->handler_scripts = apr_hash_overlay(p, child->handler_scripts,
parent->handler_scripts);
}
-#endif
return config;
}
@@ -709,9 +221,7 @@ typedef struct {
int user_authoritative;
int group_authoritative;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
apr_hash_t *handler_scripts;
-#endif
} WSGIDirectoryConfig;
static WSGIDirectoryConfig *newWSGIDirectoryConfig(apr_pool_t *p)
@@ -846,7 +356,6 @@ static void *wsgi_merge_dir_config(apr_pool_t *p, void *base_conf,
else
config->group_authoritative = parent->group_authoritative;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (!child->handler_scripts)
config->handler_scripts = parent->handler_scripts;
else if (!parent->handler_scripts)
@@ -855,7 +364,6 @@ static void *wsgi_merge_dir_config(apr_pool_t *p, void *base_conf,
config->handler_scripts = apr_hash_overlay(p, child->handler_scripts,
parent->handler_scripts);
}
-#endif
return config;
}
@@ -885,9 +393,7 @@ typedef struct {
int user_authoritative;
int group_authoritative;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
apr_hash_t *handler_scripts;
-#endif
const char *handler_script;
} WSGIRequestConfig;
@@ -988,7 +494,6 @@ static const char *wsgi_process_group(request_rec *r, const char *s)
static const char *wsgi_server_group(request_rec *r, const char *s)
{
const char *name = NULL;
- const char *value = NULL;
const char *h = NULL;
apr_port_t p = 0;
@@ -1249,7 +754,6 @@ static WSGIRequestConfig *wsgi_create_req_config(apr_pool_t *p, request_rec *r)
if (config->group_authoritative == -1)
config->group_authoritative = 1;
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (!dconfig->handler_scripts)
config->handler_scripts = sconfig->handler_scripts;
else if (!sconfig->handler_scripts)
@@ -1258,858 +762,18 @@ static WSGIRequestConfig *wsgi_create_req_config(apr_pool_t *p, request_rec *r)
config->handler_scripts = apr_hash_overlay(p, dconfig->handler_scripts,
sconfig->handler_scripts);
}
-#endif
config->handler_script = "";
return config;
}
-/*
- * Apache 2.X and UNIX specific definitions related to
- * distinct daemon processes.
- */
-
-#if defined(MOD_WSGI_WITH_DAEMONS)
-
-#include "unixd.h"
-#include "scoreboard.h"
-#include "mpm_common.h"
-#include "apr_proc_mutex.h"
-#include "apr_thread_cond.h"
-#include "apr_atomic.h"
-#include "http_connection.h"
-#include "apr_buckets.h"
-#include "apr_poll.h"
-#include "apr_signal.h"
-#include "http_vhost.h"
-
-#if APR_MAJOR_VERSION < 1
-#define apr_atomic_cas32 apr_atomic_cas
-#endif
-
-#if APR_HAVE_SYS_SOCKET_H
-#include <sys/socket.h>
-#endif
-#if APR_HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#if APR_HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_SEM_H
-#include <sys/sem.h>
-#endif
-
-#include <locale.h>
-#include <sys/un.h>
-
-#ifndef WSGI_LISTEN_BACKLOG
-#define WSGI_LISTEN_BACKLOG 100
-#endif
-
-#ifndef WSGI_CONNECT_ATTEMPTS
-#define WSGI_CONNECT_ATTEMPTS 15
-#endif
-
-#define WSGI_STACK_HEAD 0xffff
-#define WSGI_STACK_LAST 0xffff
-#define WSGI_STACK_TERMINATED 0x10000
-#define WSGI_STACK_NO_LISTENER 0x20000
-
-typedef struct {
- server_rec *server;
- long random;
- int id;
- const char *name;
- const char *user;
- uid_t uid;
- const char *group;
- gid_t gid;
- const char *groups_list;
- int groups_count;
- gid_t *groups;
- int processes;
- int multiprocess;
- int threads;
- int umask;
- const char *root;
- const char *home;
- const char *lang;
- const char *locale;
- const char *python_home;
- const char *python_path;
- const char *python_eggs;
- int stack_size;
- int maximum_requests;
- int shutdown_timeout;
- apr_time_t deadlock_timeout;
- apr_time_t inactivity_timeout;
- const char *display_name;
- int send_buffer_size;
- int recv_buffer_size;
- const char *script_user;
- const char *script_group;
- int cpu_time_limit;
- int cpu_priority;
- rlim_t memory_limit;
- rlim_t virtual_memory_limit;
- const char *socket;
- int listener_fd;
- const char* mutex_path;
- apr_proc_mutex_t* mutex;
-} WSGIProcessGroup;
-
-typedef struct {
- WSGIProcessGroup *group;
- int instance;
- apr_proc_t process;
- apr_socket_t *listener;
-} WSGIDaemonProcess;
-
-typedef struct {
- int id;
- WSGIDaemonProcess *process;
- apr_thread_t *thread;
- int running;
- int next;
- int wakeup;
- apr_thread_cond_t *condition;
- apr_thread_mutex_t *mutex;
-} WSGIDaemonThread;
-
-typedef struct {
- apr_uint32_t state;
-} WSGIThreadStack;
-
-typedef struct {
- const char *name;
- const char *socket;
- int fd;
-} WSGIDaemonSocket;
-
-static int wsgi_daemon_count = 0;
-static apr_hash_t *wsgi_daemon_index = NULL;
-static apr_hash_t *wsgi_daemon_listeners = NULL;
-
-static WSGIDaemonProcess *wsgi_daemon_process = NULL;
-
-static int volatile wsgi_request_count = 0;
-
-static WSGIDaemonThread *wsgi_worker_threads = NULL;
-
-static WSGIThreadStack *wsgi_worker_stack = NULL;
-
-#endif
-
/* Class objects used by response handler. */
static PyTypeObject Dispatch_Type;
typedef struct {
PyObject_HEAD
- const char *target;
- request_rec *r;
- int level;
- char *s;
- int l;
- int expired;
-#if PY_MAJOR_VERSION < 3
- int softspace;
-#endif
-} LogObject;
-
-static PyTypeObject Log_Type;
-
-static PyObject *newLogObject(request_rec *r, int level, const char *target)
-{
- LogObject *self;
-
-#if PY_MAJOR_VERSION >= 3
- PyObject *module = NULL;
- PyObject *dict = NULL;
- PyObject *object = NULL;
- PyObject *args = NULL;
- PyObject *result = NULL;
-
- module = PyImport_ImportModule("io");
-
- if (!module)
- return NULL;
-
- dict = PyModule_GetDict(module);
- object = PyDict_GetItemString(dict, "TextIOWrapper");
-
- if (!object) {
- PyErr_SetString(PyExc_NameError,
- "name 'TextIOWrapper' is not defined");
- return NULL;
- }
-#endif
-
- self = PyObject_New(LogObject, &Log_Type);
- if (self == NULL)
- return NULL;
-
- self->target = target;
- self->r = r;
- self->level = APLOG_NOERRNO|level;
- self->s = NULL;
- self->l = 0;
- self->expired = 0;
-#if PY_MAJOR_VERSION < 3
- self->softspace = 0;
-#endif
-
-#if PY_MAJOR_VERSION >= 3
- Py_INCREF(object);
- args = Py_BuildValue("(OssOO)", self, "utf-8", "replace",
- Py_None, Py_True);
- Py_DECREF(self);
- result = PyEval_CallObject(object, args);
- Py_DECREF(args);
- Py_DECREF(object);
-
- return result;
-#else
- return (PyObject *)self;
-#endif
-}
-
-#if 0
-static void Log_file(LogObject *self, const char *s, int l)
-{
- /*
- * XXX This function is not currently being used.
- * The intention was that it be called instead of
- * Log_call() when 'target' is non zero. This would
- * be the case for 'stdout' and 'stderr'. Doing
- * this bypasses normally Apache logging mechanisms
- * though. May reawaken this code in mod_wsgi 4.0
- * by way of a mechanism to divert logging from a
- * daemon process to specfic log file or pipe using
- * an option to WSGIDaemonProcess.
- */
-
- char errstr[MAX_STRING_LEN];
-
- int plen = 0;
- int slen = 0;
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- FILE *logf;
-#else
- apr_file_t *logf = NULL;
-#endif
-
- if (self->r)
- logf = self->r->server->error_log;
- else
- logf = wsgi_server->error_log;
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- plen = ap_snprintf(errstr, sizeof(errstr), "[%s] ", ap_get_time());
-#else
- errstr[0] = '[';
- ap_recent_ctime(errstr + 1, apr_time_now());
- errstr[1 + APR_CTIME_LEN - 1] = ']';
- errstr[1 + APR_CTIME_LEN ] = ' ';
- plen = 1 + APR_CTIME_LEN + 1;
-#endif
-
- if (self->target) {
- int len;
-
- errstr[plen++] = '[';
-
- len = strlen(self->target);
- memcpy(errstr+plen, self->target, len);
-
- plen += len;
-
- errstr[plen++] = ']';
- errstr[plen++] = ' ';
- }
-
- slen = MAX_STRING_LEN - plen - 1;
-
- Py_BEGIN_ALLOW_THREADS
-
- /*
- * We actually break long lines up into segments
- * of around 8192 characters, with the date/time
- * and target information prefixing each line.
- * This is just to avoid having to allocate more
- * memory just to format the line with prefix.
- * We want to avoid writing the prefix separately
- * so at least try and write line in one atomic
- * operation.
- */
-
- while (1) {
- if (l > slen) {
- memcpy(errstr+plen, s, slen);
- errstr[plen+slen] = '\n';
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- fwrite(errstr, plen+slen+1, 1, logf);
- fflush(logf);
-#else
- apr_file_write_full(logf, errstr, plen+slen+1, NULL);
- apr_file_flush(logf);
-#endif
- s += slen;
- l -= slen;
- }
- else {
- memcpy(errstr+plen, s, l);
- errstr[plen+l] = '\n';
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- fwrite(errstr, plen+l+1, 1, logf);
- fflush(logf);
-#else
- apr_file_write_full(logf, errstr, plen+l+1, NULL);
- apr_file_flush(logf);
-#endif
- break;
- }
- }
-
- Py_END_ALLOW_THREADS
-}
-#endif
-
-static void Log_call(LogObject *self, const char *s, int l)
-{
- /*
- * The length of the string to be logged is ignored
- * for now. We just pass the whole string to the
- * Apache error log functions. It will actually
- * truncate it at some value less than 8192
- * characters depending on the length of the prefix
- * to go at the front. If there are embedded NULLs
- * then truncation will occur at that point. That
- * truncation occurs like this is also what happens
- * if using FASTCGI solutions for Apache, so not
- * doing anything different here.
- */
-
- if (self->r) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
- self->r, "%s", s);
- Py_END_ALLOW_THREADS
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level),
- wsgi_server, "%s", s);
- Py_END_ALLOW_THREADS
- }
-}
-
-static void Log_dealloc(LogObject *self)
-{
- if (self->s) {
- if (!self->expired)
- Log_call(self, self->s, self->l);
-
- free(self->s);
- }
-
- PyObject_Del(self);
-}
-
-static PyObject *Log_flush(LogObject *self, PyObject *args)
-{
- if (self->expired) {
- PyErr_SetString(PyExc_RuntimeError, "log object has expired");
- return NULL;
- }
-
- if (!PyArg_ParseTuple(args, ":flush"))
- return NULL;
-
- if (self->s) {
- Log_call(self, self->s, self->l);
-
- free(self->s);
- self->s = NULL;
- self->l = 0;
- }
-
- Py_INCREF(Py_None);
- return Py_None;
-}
-
-static PyObject *Log_close(LogObject *self, PyObject *args)
-{
- PyObject *result = NULL;
-
- if (!PyArg_ParseTuple(args, ":close"))
- return NULL;
-
- if (!self->expired)
- result = Log_flush(self, args);
-
- Py_XDECREF(result);
-
- self->r = NULL;
- self->expired = 1;
-
- Py_INCREF(Py_None);
- return Py_None;
-}
-
-static PyObject *Log_isatty(LogObject *self, PyObject *args)
-{
- PyObject *result = NULL;
-
- if (!PyArg_ParseTuple(args, ":isatty"))
- return NULL;
-
- Py_INCREF(Py_False);
- return Py_False;
-}
-
-static void Log_queue(LogObject *self, const char *msg, int len)
-{
- const char *p = NULL;
- const char *q = NULL;
- const char *e = NULL;
-
- p = msg;
- e = p + len;
-
- /*
- * Break string on newline. This is on assumption
- * that primarily textual information being logged.
- */
-
- q = p;
- while (q != e) {
- if (*q == '\n')
- break;
- q++;
- }
-
- while (q != e) {
- /* Output each complete line. */
-
- if (self->s) {
- /* Need to join with buffered value. */
-
- int m = 0;
- int n = 0;
- char *s = NULL;
-
- m = self->l;
- n = m+q-p+1;
-
- s = (char *)malloc(n);
- memcpy(s, self->s, m);
- memcpy(s+m, p, q-p);
- s[n-1] = '\0';
-
- free(self->s);
- self->s = NULL;
- self->l = 0;
-
- Log_call(self, s, n-1);
-
- free(s);
- }
- else {
- int n = 0;
- char *s = NULL;
-
- n = q-p+1;
-
- s = (char *)malloc(n);
- memcpy(s, p, q-p);
- s[n-1] = '\0';
-
- Log_call(self, s, n-1);
-
- free(s);
- }
-
- p = q+1;
-
- /* Break string on newline. */
-
- q = p;
- while (q != e) {
- if (*q == '\n')
- break;
- q++;
- }
- }
-
- if (p != e) {
- /* Save away incomplete line. */
-
- if (self->s) {
- /* Need to join with buffered value. */
-
- int m = 0;
- int n = 0;
-
- m = self->l;
- n = m+e-p+1;
-
- self->s = (char *)realloc(self->s, n);
- memcpy(self->s+m, p, e-p);
- self->s[n-1] = '\0';
- self->l = n-1;
- }
- else {
- int n = 0;
-
- n = e-p+1;
-
- self->s = (char *)malloc(n);
- memcpy(self->s, p, n-1);
- self->s[n-1] = '\0';
- self->l = n-1;
- }
- }
-}
-
-static PyObject *Log_write(LogObject *self, PyObject *args)
-{
- const char *msg = NULL;
- int len = -1;
-
- if (self->expired) {
- PyErr_SetString(PyExc_RuntimeError, "log object has expired");
- return NULL;
- }
-
- if (!PyArg_ParseTuple(args, "s#:write", &msg, &len))
- return NULL;
-
- Log_queue(self, msg, len);
-
- Py_INCREF(Py_None);
- return Py_None;
-}
-
-static PyObject *Log_writelines(LogObject *self, PyObject *args)
-{
- PyObject *sequence = NULL;
- PyObject *iterator = NULL;
- PyObject *item = NULL;
- const char *msg = NULL;
-
- if (self->expired) {
- PyErr_SetString(PyExc_RuntimeError, "log object has expired");
- return NULL;
- }
-
- if (!PyArg_ParseTuple(args, "O:writelines", &sequence))
- return NULL;
-
- iterator = PyObject_GetIter(sequence);
-
- if (iterator == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "argument must be sequence of strings");
-
- return NULL;
- }
-
- while ((item = PyIter_Next(iterator))) {
- PyObject *result = NULL;
- PyObject *args = NULL;
-
- args = PyTuple_Pack(1, item);
-
- result = Log_write(self, args);
-
- Py_DECREF(args);
- Py_DECREF(item);
-
- if (!result) {
- Py_DECREF(iterator);
-
- PyErr_SetString(PyExc_TypeError,
- "argument must be sequence of strings");
-
- return NULL;
- }
- }
-
- Py_DECREF(iterator);
-
- Py_INCREF(Py_None);
- return Py_None;
-}
-
-#if PY_MAJOR_VERSION >= 3
-static PyObject *Log_readable(LogObject *self, PyObject *args)
-{
- if (!PyArg_ParseTuple(args, ":readable"))
- return NULL;
-
- Py_INCREF(Py_False);
- return Py_False;
-}
-
-static PyObject *Log_seekable(LogObject *self, PyObject *args)
-{
- if (!PyArg_ParseTuple(args, ":seekable"))
- return NULL;
-
- Py_INCREF(Py_False);
- return Py_False;
-}
-
-static PyObject *Log_writable(LogObject *self, PyObject *args)
-{
- if (!PyArg_ParseTuple(args, ":writable"))
- return NULL;
-
- Py_INCREF(Py_True);
- return Py_True;
-}
-#endif
-
-static PyObject *Log_closed(LogObject *self, void *closure)
-{
- Py_INCREF(Py_False);
- return Py_False;
-}
-
-#if PY_MAJOR_VERSION < 3
-static PyObject *Log_get_softspace(LogObject *self, void *closure)
-{
- return PyInt_FromLong(self->softspace);
-}
-
-static int Log_set_softspace(LogObject *self, PyObject *value)
-{
- int new;
-
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError, "can't delete softspace attribute");
- return -1;
- }
-
- new = PyInt_AsLong(value);
- if (new == -1 && PyErr_Occurred())
- return -1;
-
- self->softspace = new;
-
- return 0;
-}
-
-#else
-
-static PyObject *Log_get_encoding(LogObject *self, void *closure)
-{
- return PyUnicode_FromString("utf-8");
-}
-
-static PyObject *Log_get_errors(LogObject *self, void *closure)
-{
- return PyUnicode_FromString("replace");
-}
-#endif
-
-static PyMethodDef Log_methods[] = {
- { "flush", (PyCFunction)Log_flush, METH_VARARGS, 0 },
- { "close", (PyCFunction)Log_close, METH_VARARGS, 0 },
- { "isatty", (PyCFunction)Log_isatty, METH_VARARGS, 0 },
- { "write", (PyCFunction)Log_write, METH_VARARGS, 0 },
- { "writelines", (PyCFunction)Log_writelines, METH_VARARGS, 0 },
-#if PY_MAJOR_VERSION >= 3
- { "readable", (PyCFunction)Log_readable, METH_VARARGS, 0 },
- { "seekable", (PyCFunction)Log_seekable, METH_VARARGS, 0 },
- { "writable", (PyCFunction)Log_writable, METH_VARARGS, 0 },
-#endif
- { NULL, NULL}
-};
-
-static PyGetSetDef Log_getset[] = {
- { "closed", (getter)Log_closed, NULL, 0 },
-#if PY_MAJOR_VERSION < 3
- { "softspace", (getter)Log_get_softspace, (setter)Log_set_softspace, 0 },
-#else
- { "encoding", (getter)Log_get_encoding, NULL, 0 },
- { "errors", (getter)Log_get_errors, NULL, 0 },
-#endif
- { NULL },
-};
-
-static PyTypeObject Log_Type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "mod_wsgi.Log", /*tp_name*/
- sizeof(LogObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)Log_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash*/
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /*tp_flags*/
- 0, /*tp_doc*/
- 0, /*tp_traverse*/
- 0, /*tp_clear*/
- 0, /*tp_richcompare*/
- 0, /*tp_weaklistoffset*/
- 0, /*tp_iter*/
- 0, /*tp_iternext*/
- Log_methods, /*tp_methods*/
- 0, /*tp_members*/
- Log_getset, /*tp_getset*/
- 0, /*tp_base*/
- 0, /*tp_dict*/
- 0, /*tp_descr_get*/
- 0, /*tp_descr_set*/
- 0, /*tp_dictoffset*/
- 0, /*tp_init*/
- 0, /*tp_alloc*/
- 0, /*tp_new*/
- 0, /*tp_free*/
- 0, /*tp_is_gc*/
-};
-
-static void wsgi_log_python_error(request_rec *r, PyObject *log,
- const char *filename)
-{
- PyObject *m = NULL;
- PyObject *result = NULL;
-
- PyObject *type = NULL;
- PyObject *value = NULL;
- PyObject *traceback = NULL;
-
- PyObject *xlog = NULL;
-
- if (!PyErr_Occurred())
- return;
-
- if (!log) {
- PyErr_Fetch(&type, &value, &traceback);
-
- xlog = newLogObject(r, APLOG_ERR, NULL);
-
- log = xlog;
-
- PyErr_Restore(type, value, traceback);
-
- type = NULL;
- value = NULL;
- traceback = NULL;
- }
-
- if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
- Py_BEGIN_ALLOW_THREADS
- if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
- "mod_wsgi (pid=%d): SystemExit exception raised by "
- "WSGI script '%s' ignored.", getpid(), filename);
- }
- else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): SystemExit exception raised by "
- "WSGI script '%s' ignored.", getpid(), filename);
- }
- Py_END_ALLOW_THREADS
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
- "mod_wsgi (pid=%d): Exception occurred processing "
- "WSGI script '%s'.", getpid(), filename);
- }
- else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Exception occurred processing "
- "WSGI script '%s'.", getpid(), filename);
- }
- Py_END_ALLOW_THREADS
- }
-
- PyErr_Fetch(&type, &value, &traceback);
- PyErr_NormalizeException(&type, &value, &traceback);
-
- if (!value) {
- value = Py_None;
- Py_INCREF(value);
- }
-
- if (!traceback) {
- traceback = Py_None;
- Py_INCREF(traceback);
- }
-
- m = PyImport_ImportModule("traceback");
-
- if (m) {
- PyObject *d = NULL;
- PyObject *o = NULL;
- d = PyModule_GetDict(m);
- o = PyDict_GetItemString(d, "print_exception");
- if (o) {
- PyObject *args = NULL;
- Py_INCREF(o);
- args = Py_BuildValue("(OOOOO)", type, value, traceback,
- Py_None, log);
- result = PyEval_CallObject(o, args);
- Py_DECREF(args);
- Py_DECREF(o);
- }
- }
-
- if (!result) {
- /*
- * If can't output exception and traceback then
- * use PyErr_Print to dump out details of the
- * exception. For SystemExit though if we do
- * that the process will actually be terminated
- * so can only clear the exception information
- * and keep going.
- */
-
- PyErr_Restore(type, value, traceback);
-
- if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
- PyErr_Print();
- PyErr_Clear();
- }
- else {
- PyErr_Clear();
- }
- }
- else {
- Py_XDECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
- }
-
- Py_XDECREF(result);
-
- Py_XDECREF(m);
-
- Py_XDECREF(xlog);
-}
-
-typedef struct {
- PyObject_HEAD
request_rec *r;
int init;
int done;
@@ -2156,9 +820,6 @@ static PyObject *Input_close(InputObject *self, PyObject *args)
return NULL;
}
- if (!PyArg_ParseTuple(args, ":close"))
- return NULL;
-
Py_INCREF(Py_None);
return Py_None;
}
@@ -2183,11 +844,20 @@ static PyObject *Input_read(InputObject *self, PyObject *args)
return NULL;
#if defined(MOD_WSGI_WITH_DAEMONS)
- if (wsgi_inactivity_timeout) {
- apr_thread_mutex_lock(wsgi_shutdown_lock);
- wsgi_inactivity_shutdown_time = apr_time_now();
- wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ if (wsgi_idle_timeout || wsgi_busy_timeout) {
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+
+ if (wsgi_idle_timeout) {
+ wsgi_idle_shutdown_time = apr_time_now();
+ wsgi_idle_shutdown_time += wsgi_idle_timeout;
+ }
+
+ if (wsgi_busy_timeout) {
+ wsgi_busy_shutdown_time = apr_time_now();
+ wsgi_busy_shutdown_time += wsgi_busy_timeout;
+ }
+
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
}
#endif
@@ -2798,7 +1468,7 @@ static PyObject *Input_readlines(InputObject *self, PyObject *args)
}
static PyMethodDef Input_methods[] = {
- { "close", (PyCFunction)Input_close, METH_VARARGS, 0 },
+ { "close", (PyCFunction)Input_close, METH_NOARGS, 0 },
{ "read", (PyCFunction)Input_read, METH_VARARGS, 0 },
{ "readline", (PyCFunction)Input_readline, METH_VARARGS, 0 },
{ "readlines", (PyCFunction)Input_readlines, METH_VARARGS, 0 },
@@ -2899,9 +1569,7 @@ typedef struct {
PyObject_HEAD
int result;
request_rec *r;
-#if defined(MOD_WSGI_WITH_BUCKETS)
apr_bucket_brigade *bb;
-#endif
WSGIRequestConfig *config;
InputObject *input;
PyObject *log;
@@ -2916,15 +1584,6 @@ typedef struct {
static PyTypeObject Adapter_Type;
-typedef struct {
- PyObject_HEAD
- AdapterObject *adapter;
- PyObject *filelike;
- apr_size_t blksize;
-} StreamObject;
-
-static PyTypeObject Stream_Type;
-
static AdapterObject *newAdapterObject(request_rec *r)
{
AdapterObject *self;
@@ -2937,9 +1596,7 @@ static AdapterObject *newAdapterObject(request_rec *r)
self->r = r;
-#if defined(MOD_WSGI_WITH_BUCKETS)
self->bb = NULL;
-#endif
self->config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
&wsgi_module);
@@ -2972,56 +1629,31 @@ static void Adapter_dealloc(AdapterObject *self)
static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args)
{
- const char *status = NULL;
- PyObject *headers = NULL;
- PyObject *exc_info = NULL;
+ PyObject *result = NULL;
- PyObject *item = NULL;
- PyObject *latin_item = NULL;
+ PyObject *status_line = NULL;
+ PyObject *headers = NULL;
+ PyObject *exc_info = Py_None;
- char* value = NULL;
+ PyObject *status_line_as_bytes = NULL;
+ PyObject *headers_as_bytes = NULL;
if (!self->r) {
PyErr_SetString(PyExc_RuntimeError, "request object has expired");
return NULL;
}
- if (!PyArg_ParseTuple(args, "OO|O:start_response",
- &item, &headers, &exc_info)) {
- return NULL;
- }
-
-#if PY_MAJOR_VERSION >= 3
- if (PyUnicode_Check(item)) {
- latin_item = PyUnicode_AsLatin1String(item);
- if (!latin_item) {
- PyErr_Format(PyExc_TypeError, "expected byte string object for "
- "status, value containing non 'latin-1' characters "
- "found");
- return NULL;
- }
-
- item = latin_item;
- }
-#endif
-
- if (!PyString_Check(item)) {
- PyErr_Format(PyExc_TypeError, "expected byte string object for "
- "status, value of type %.200s found",
- item->ob_type->tp_name);
- Py_XDECREF(latin_item);
+ if (!PyArg_ParseTuple(args, "OO!|O:start_response",
+ &status_line, &PyList_Type, &headers, &exc_info)) {
return NULL;
}
- status = PyString_AsString(item);
-
- if (!PyList_Check(headers)) {
- PyErr_SetString(PyExc_TypeError, "response headers must be a list");
- Py_XDECREF(latin_item);
+ if (exc_info != Py_None && !PyTuple_Check(exc_info)) {
+ PyErr_SetString(PyExc_RuntimeError, "exception info must be a tuple");
return NULL;
}
- if (exc_info && exc_info != Py_None) {
+ if (exc_info != Py_None) {
if (self->status_line && !self->headers) {
PyObject *type = NULL;
PyObject *value = NULL;
@@ -3029,7 +1661,6 @@ static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args)
if (!PyArg_ParseTuple(exc_info, "OOO", &type,
&value, &traceback)) {
- Py_XDECREF(latin_item);
return NULL;
}
@@ -3039,61 +1670,63 @@ static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args)
PyErr_Restore(type, value, traceback);
- Py_XDECREF(latin_item);
-
return NULL;
}
}
else if (self->status_line && !self->headers) {
PyErr_SetString(PyExc_RuntimeError, "headers have already been sent");
- Py_XDECREF(latin_item);
return NULL;
}
- self->status_line = apr_pstrdup(self->r->pool, status);
+ status_line_as_bytes = wsgi_convert_status_line_to_bytes(status_line);
- value = ap_getword(self->r->pool, &status, ' ');
+ if (!status_line_as_bytes)
+ goto finally;
- errno = 0;
- self->status = strtol(value, &value, 10);
+ headers_as_bytes = wsgi_convert_headers_to_bytes(headers);
- if (*value || errno == ERANGE) {
- PyErr_SetString(PyExc_TypeError, "status value is not an integer");
- Py_XDECREF(latin_item);
- return NULL;
- }
+ if (!headers_as_bytes)
+ goto finally;
- if (!*status) {
- PyErr_SetString(PyExc_ValueError, "status message was not supplied");
- Py_XDECREF(latin_item);
- return NULL;
- }
+ self->status_line = apr_pstrdup(self->r->pool, PyString_AsString(
+ status_line_as_bytes));
+ self->status = strtol(self->status_line, NULL, 10);
Py_XDECREF(self->headers);
+ self->headers = headers_as_bytes;
+ Py_INCREF(headers_as_bytes);
- self->headers = headers;
+ result = PyObject_GetAttrString((PyObject *)self, "write");
- Py_INCREF(self->headers);
-
- Py_XDECREF(latin_item);
+finally:
+ Py_XDECREF(status_line_as_bytes);
+ Py_XDECREF(headers_as_bytes);
- return PyObject_GetAttrString((PyObject *)self, "write");
+ return result;
}
static int Adapter_output(AdapterObject *self, const char *data, int length,
- int exception_when_aborted)
+ PyObject *string_object, int exception_when_aborted)
{
int i = 0;
- int n = 0;
apr_status_t rv;
request_rec *r;
#if defined(MOD_WSGI_WITH_DAEMONS)
- if (wsgi_inactivity_timeout) {
- apr_thread_mutex_lock(wsgi_shutdown_lock);
- wsgi_inactivity_shutdown_time = apr_time_now();
- wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ if (wsgi_idle_timeout || wsgi_busy_timeout) {
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+
+ if (wsgi_idle_timeout) {
+ wsgi_idle_shutdown_time = apr_time_now();
+ wsgi_idle_shutdown_time += wsgi_idle_timeout;
+ }
+
+ if (wsgi_busy_timeout) {
+ wsgi_busy_shutdown_time = apr_time_now();
+ wsgi_busy_shutdown_time += wsgi_busy_timeout;
+ }
+
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
}
#endif
@@ -3126,8 +1759,7 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
* is older.
*/
-#if (AP_SERVER_MAJORVERSION_NUMBER == 1) || \
- (AP_SERVER_MAJORVERSION_NUMBER == 2 && \
+#if (AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER < 2) || \
(AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER == 2 && \
@@ -3148,7 +1780,13 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
#endif
- /* Now setup response headers in request object. */
+ /*
+ * Now setup the response headers in request object. We
+ * have already converted any native strings in the
+ * headers to byte strings and validated the format of
+ * the header names and values so can skip all the error
+ * checking.
+ */
r->status = self->status;
r->status_line = self->status_line;
@@ -3164,82 +1802,13 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
tuple = PyList_GetItem(self->headers, i);
- if (!PyTuple_Check(tuple)) {
- PyErr_Format(PyExc_TypeError, "list of tuple values "
- "expected, value of type %.200s found",
- tuple->ob_type->tp_name);
- return 0;
- }
-
- if (PyTuple_Size(tuple) != 2) {
- PyErr_Format(PyExc_ValueError, "tuple of length 2 "
- "expected, length is %d",
- (int)PyTuple_Size(tuple));
- return 0;
- }
-
object1 = PyTuple_GetItem(tuple, 0);
object2 = PyTuple_GetItem(tuple, 1);
- if (PyString_Check(object1)) {
- name = PyString_AsString(object1);
- }
-#if PY_MAJOR_VERSION >= 3
- else if (PyUnicode_Check(object1)) {
- PyObject *latin_object;
- latin_object = PyUnicode_AsLatin1String(object1);
- if (!latin_object) {
- PyErr_Format(PyExc_TypeError, "header name "
- "contained non 'latin-1' characters ");
- return 0;
- }
-
- name = apr_pstrdup(r->pool, PyString_AsString(latin_object));
- Py_DECREF(latin_object);
- }
-#endif
- else {
- PyErr_Format(PyExc_TypeError, "expected byte string object "
- "for header name, value of type %.200s "
- "found", object1->ob_type->tp_name);
- return 0;
- }
-
- if (PyString_Check(object2)) {
- value = PyString_AsString(object2);
- }
-#if PY_MAJOR_VERSION >= 3
- else if (PyUnicode_Check(object2)) {
- PyObject *latin_object;
- latin_object = PyUnicode_AsLatin1String(object2);
- if (!latin_object) {
- PyErr_Format(PyExc_TypeError, "header value "
- "contained non 'latin-1' characters ");
- return 0;
- }
-
- value = apr_pstrdup(r->pool, PyString_AsString(latin_object));
- Py_DECREF(latin_object);
- }
-#endif
- else {
- PyErr_Format(PyExc_TypeError, "expected byte string object "
- "for header value, value of type %.200s "
- "found", object2->ob_type->tp_name);
- return 0;
- }
-
- if (strchr(name, '\n') != 0 || strchr(value, '\n') != 0) {
- PyErr_Format(PyExc_ValueError, "embedded newline in "
- "response header with name '%s' and value '%s'",
- name, value);
- return 0;
- }
+ name = PyBytes_AsString(object1);
+ value = PyBytes_AsString(object2);
if (!strcasecmp(name, "Content-Type")) {
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- r->content_type = apr_pstrdup(r->pool, value);
-#else
/*
* In a daemon child process we cannot call the
* function ap_set_content_type() as want to
@@ -3253,7 +1822,6 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
r->content_type = apr_pstrdup(r->pool, value);
else
ap_set_content_type(r, apr_pstrdup(r->pool, value));
-#endif
}
else if (!strcasecmp(name, "Content-Length")) {
char *v = value;
@@ -3280,12 +1848,6 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
}
}
- /* Need to force output of headers when using Apache 1.3. */
-
- Py_BEGIN_ALLOW_THREADS
- ap_send_http_header(r);
- Py_END_ALLOW_THREADS
-
/*
* Reset flag indicating whether '100 Continue' response
* expected. If we don't do this then if an attempt to read
@@ -3330,7 +1892,6 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
/* Now output any data. */
if (length) {
-#if defined(MOD_WSGI_WITH_BUCKETS)
apr_bucket *b;
/*
@@ -3349,7 +1910,7 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
if (r->connection->aborted) {
if (!exception_when_aborted) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
@@ -3364,8 +1925,20 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
r->connection->bucket_alloc);
}
- b = apr_bucket_transient_create(data, length,
- r->connection->bucket_alloc);
+#if 0
+ if (string_object) {
+ b = wsgi_apr_bucket_python_create(data, length,
+ self->config->application_group, string_object,
+ r->connection->bucket_alloc);
+ }
+ else {
+#endif
+ b = apr_bucket_transient_create(data, length,
+ r->connection->bucket_alloc);
+#if 0
+ }
+#endif
+
APR_BRIGADE_INSERT_TAIL(self->bb, b);
b = apr_bucket_flush_create(r->connection->bucket_alloc);
@@ -3383,32 +1956,6 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
Py_BEGIN_ALLOW_THREADS
apr_brigade_cleanup(self->bb);
Py_END_ALLOW_THREADS
-#else
- /*
- * In Apache 1.3, the bucket brigade system doesn't exist,
- * so have no choice but to use ap_rwrite()/ap_rflush().
- * It is not believed that Apache 1.3 suffers the memory
- * accumulation problem when streaming lots of data.
- */
-
- Py_BEGIN_ALLOW_THREADS
- n = ap_rwrite(data, length, r);
- Py_END_ALLOW_THREADS
-
- if (n == -1) {
- PyErr_SetString(PyExc_IOError, "failed to write data");
- return 0;
- }
-
- Py_BEGIN_ALLOW_THREADS
- n = ap_rflush(r);
- Py_END_ALLOW_THREADS
-
- if (n == -1) {
- PyErr_SetString(PyExc_IOError, "failed to flush data");
- return 0;
- }
-#endif
}
/*
@@ -3424,7 +1971,7 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
if (r->connection->aborted) {
if (!exception_when_aborted) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
@@ -3437,8 +1984,6 @@ static int Adapter_output(AdapterObject *self, const char *data, int length,
return 1;
}
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-
/* Split buckets at 1GB when sending large files. */
#define MAX_BUCKET_SIZE (0x40000000)
@@ -3517,11 +2062,7 @@ static int Adapter_output_file(AdapterObject *self, apr_file_t* tmpfile,
return 1;
}
-#endif
-
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
static APR_OPTIONAL_FN_TYPE(ssl_is_https) *wsgi_is_https = NULL;
-#endif
static PyObject *Adapter_environ(AdapterObject *self)
{
@@ -3656,14 +2197,13 @@ static PyObject *Adapter_environ(AdapterObject *self)
/* Setup file wrapper object for efficient file responses. */
- object = PyObject_GetAttrString((PyObject *)self, "file_wrapper");
- PyDict_SetItemString(vars, "wsgi.file_wrapper", object);
- Py_DECREF(object);
+ PyDict_SetItemString(vars, "wsgi.file_wrapper", (PyObject *)&Stream_Type);
/* Add mod_wsgi version information. */
- object = Py_BuildValue("(ii)", MOD_WSGI_MAJORVERSION_NUMBER,
- MOD_WSGI_MINORVERSION_NUMBER);
+ object = Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER,
+ MOD_WSGI_MINORVERSION_NUMBER,
+ MOD_WSGI_MICROVERSION_NUMBER);
PyDict_SetItemString(vars, "mod_wsgi.version", object);
Py_DECREF(object);
@@ -3674,7 +2214,8 @@ static PyObject *Adapter_environ(AdapterObject *self)
*/
if (!wsgi_daemon_pool && self->config->pass_apache_request) {
-#if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2
+#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \
+ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7)
object = PyCapsule_New(self->r, 0, 0);
#else
object = PyCObject_FromVoidPtr(self->r, 0);
@@ -3689,7 +2230,6 @@ static PyObject *Adapter_environ(AdapterObject *self)
*/
#if 0
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (!wsgi_daemon_pool) {
object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https");
PyDict_SetItemString(vars, "mod_ssl.is_https", object);
@@ -3700,7 +2240,6 @@ static PyObject *Adapter_environ(AdapterObject *self)
Py_DECREF(object);
}
#endif
-#endif
return vars;
}
@@ -3710,8 +2249,6 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
int done = 0;
#ifndef WIN32
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-
PyObject *filelike = NULL;
PyObject *method = NULL;
PyObject *object = NULL;
@@ -3729,7 +2266,7 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
/* Perform file wrapper optimisations where possible. */
- if (self->sequence->ob_type != &Stream_Type)
+ if (!PyObject_IsInstance(self->sequence, (PyObject *)&Stream_Type))
return 0;
/*
@@ -3758,14 +2295,24 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
* iterable value.
*/
- filelike = ((StreamObject *)self->sequence)->filelike;
+
+ filelike = PyObject_GetAttrString((PyObject *)self->sequence, "filelike");
+
+ if (!filelike) {
+ PyErr_SetString(PyExc_KeyError,
+ "file wrapper no filelike attribute");
+ return 0;
+ }
fd = PyObject_AsFileDescriptor(filelike);
if (fd == -1) {
PyErr_Clear();
+ Py_DECREF(filelike);
return 0;
}
+ Py_DECREF(filelike);
+
/*
* On some platforms, such as Linux, sendfile() system call
* will not work on UNIX sockets. Thus when using daemon mode
@@ -3815,28 +2362,28 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
return 0;
}
- if (PyLong_Check(object)) {
+ if (PyLong_Check(object)) {
#if defined(HAVE_LONG_LONG)
- fo_offset = PyLong_AsLongLong(object);
+ fo_offset = PyLong_AsLongLong(object);
#else
- fo_offset = PyLong_AsLong(object);
+ fo_offset = PyLong_AsLong(object);
#endif
- }
+ }
#if PY_MAJOR_VERSION < 3
- else if (PyInt_Check(object)) {
- fo_offset = PyInt_AsLong(object);
- }
+ else if (PyInt_Check(object)) {
+ fo_offset = PyInt_AsLong(object);
+ }
#endif
- else {
- Py_DECREF(object);
- return 0;
- }
+ else {
+ Py_DECREF(object);
+ return 0;
+ }
- if (PyErr_Occurred()){
- Py_DECREF(object);
- PyErr_Clear();
- return 0;
- }
+ if (PyErr_Occurred()){
+ Py_DECREF(object);
+ PyErr_Clear();
+ return 0;
+ }
Py_DECREF(object);
@@ -3858,7 +2405,7 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
* logged later.
*/
- if (!Adapter_output(self, "", 0, 0))
+ if (!Adapter_output(self, "", 0, NULL, 0))
return 1;
/*
@@ -3906,7 +2453,6 @@ static int Adapter_process_file_wrapper(AdapterObject *self)
apr_file_seek(tmpfile, APR_SET, &fd_offset);
#endif
-#endif
return done;
}
@@ -3919,18 +2465,57 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
PyObject *iterator = NULL;
PyObject *close = NULL;
+ PyObject *wrapper = NULL;
+
const char *msg = NULL;
int length = 0;
#if defined(MOD_WSGI_WITH_DAEMONS)
- if (wsgi_inactivity_timeout) {
- apr_thread_mutex_lock(wsgi_shutdown_lock);
- wsgi_inactivity_shutdown_time = apr_time_now();
- wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ if (wsgi_idle_timeout || wsgi_busy_timeout) {
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+
+ if (wsgi_idle_timeout) {
+ wsgi_idle_shutdown_time = apr_time_now();
+ wsgi_idle_shutdown_time += wsgi_idle_timeout;
+ }
+
+ if (wsgi_busy_timeout) {
+ wsgi_busy_shutdown_time = apr_time_now();
+ wsgi_busy_shutdown_time += wsgi_busy_timeout;
+ }
+
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
}
#endif
+ if (wsgi_newrelic_config_file) {
+ PyObject *module = NULL;
+
+ module = PyImport_ImportModule("newrelic.api.web_transaction");
+
+ if (module) {
+ PyObject *dict;
+ PyObject *factory;
+
+ dict = PyModule_GetDict(module);
+ factory = PyDict_GetItemString(dict, "WSGIApplicationWrapper");
+
+ if (factory) {
+ Py_INCREF(factory);
+
+ wrapper = PyObject_CallFunctionObjArgs(
+ factory, object, Py_None, NULL);
+
+ Py_DECREF(factory);
+ }
+
+ Py_DECREF(module);
+ }
+ }
+
+ if (wrapper)
+ object = wrapper;
+
vars = Adapter_environ(self);
start = PyObject_GetAttrString((PyObject *)self, "start_response");
@@ -3949,24 +2534,6 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
PyObject *item = NULL;
while ((item = PyIter_Next(iterator))) {
-#if PY_MAJOR_VERSION >= 3
- if (PyUnicode_Check(item)) {
- PyObject *latin_item;
- latin_item = PyUnicode_AsLatin1String(item);
- if (!latin_item) {
- PyErr_Format(PyExc_TypeError, "sequence of "
- "byte string values expected, value "
- "containing non 'latin-1' characters "
- "found");
- Py_DECREF(item);
- break;
- }
-
- Py_DECREF(item);
- item = latin_item;
- }
-#endif
-
if (!PyString_Check(item)) {
PyErr_Format(PyExc_TypeError, "sequence of byte "
"string values expected, value of "
@@ -3984,7 +2551,8 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
break;
}
- if (length && !Adapter_output(self, msg, length, 0)) {
+ if (length && !Adapter_output(self, msg, length,
+ item, 0)) {
if (!PyErr_Occurred())
aborted = 1;
Py_DECREF(item);
@@ -3996,7 +2564,7 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
}
if (!PyErr_Occurred() && !aborted) {
- if (Adapter_output(self, "", 0, 0))
+ if (Adapter_output(self, "", 0, NULL, 0))
self->result = OK;
}
@@ -4012,7 +2580,7 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
if (self->content_length_set && ((!PyErr_Occurred() &&
self->output_length != self->content_length) ||
(self->output_length > self->content_length))) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r,
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r,
"mod_wsgi (pid=%d): Content length mismatch, "
"expected %s, response generated %s: %s", getpid(),
apr_off_t_toa(self->r->pool, self->content_length),
@@ -4050,19 +2618,8 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
if (PyErr_Occurred())
wsgi_log_python_error(self->r, self->log, self->r->filename);
-
- Py_DECREF(self->sequence);
-
- self->sequence = NULL;
}
-
- Py_DECREF(args);
- Py_DECREF(start);
- Py_DECREF(vars);
-
- /* Log details of any final Python exceptions. */
-
- if (PyErr_Occurred())
+ else
wsgi_log_python_error(self->r, self->log, self->r->filename);
/*
@@ -4075,6 +2632,15 @@ static int Adapter_run(AdapterObject *self, PyObject *object)
if (self->result == HTTP_INTERNAL_SERVER_ERROR)
self->r->status_line = "500 Internal Server Error";
+ Py_DECREF(args);
+ Py_DECREF(start);
+ Py_DECREF(vars);
+
+ Py_XDECREF(wrapper);
+
+ Py_XDECREF(self->sequence);
+ self->sequence = NULL;
+
return self->result;
}
@@ -4085,6 +2651,8 @@ static PyObject *Adapter_write(AdapterObject *self, PyObject *args)
const char *data = NULL;
int length = 0;
+ /* XXX The use of latin_item here looks very broken. */
+
if (!self->r) {
PyErr_SetString(PyExc_RuntimeError, "request object has expired");
return NULL;
@@ -4093,20 +2661,6 @@ static PyObject *Adapter_write(AdapterObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "O:write", &item))
return NULL;
-#if PY_MAJOR_VERSION >= 3
- if (PyUnicode_Check(item)) {
- latin_item = PyUnicode_AsLatin1String(item);
- if (!latin_item) {
- PyErr_Format(PyExc_TypeError, "byte string value expected, "
- "value containing non 'latin-1' characters found");
- return NULL;
- }
-
- Py_DECREF(item);
- item = latin_item;
- }
-#endif
-
if (!PyString_Check(item)) {
PyErr_Format(PyExc_TypeError, "byte string value expected, value "
"of type %.200s found", item->ob_type->tp_name);
@@ -4117,7 +2671,7 @@ static PyObject *Adapter_write(AdapterObject *self, PyObject *args)
data = PyString_AsString(item);
length = PyString_Size(item);
- if (!Adapter_output(self, data, length, 1)) {
+ if (!Adapter_output(self, data, length, item, 1)) {
Py_XDECREF(latin_item);
return NULL;
}
@@ -4128,28 +2682,6 @@ static PyObject *Adapter_write(AdapterObject *self, PyObject *args)
return Py_None;
}
-static PyObject *newStreamObject(AdapterObject *adapter, PyObject *filelike,
- apr_size_t blksize);
-
-static PyObject *Adapter_file_wrapper(AdapterObject *self, PyObject *args)
-{
- PyObject *filelike = NULL;
- apr_size_t blksize = HUGE_STRING_LEN;
- PyObject *result = NULL;
-
- if (!self->r) {
- PyErr_SetString(PyExc_RuntimeError, "request object has expired");
- return NULL;
- }
-
- if (!PyArg_ParseTuple(args, "O|l:file_wrapper", &filelike, &blksize))
- return NULL;
-
- return newStreamObject(self, filelike, blksize);
-}
-
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-
static PyObject *Adapter_ssl_is_https(AdapterObject *self, PyObject *args)
{
APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0;
@@ -4237,16 +2769,11 @@ static PyObject *Adapter_ssl_var_lookup(AdapterObject *self, PyObject *args)
#endif
}
-#endif
-
static PyMethodDef Adapter_methods[] = {
{ "start_response", (PyCFunction)Adapter_start_response, METH_VARARGS, 0 },
{ "write", (PyCFunction)Adapter_write, METH_VARARGS, 0 },
- { "file_wrapper", (PyCFunction)Adapter_file_wrapper, METH_VARARGS, 0 },
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
{ "ssl_is_https", (PyCFunction)Adapter_ssl_is_https, METH_VARARGS, 0 },
{ "ssl_var_lookup", (PyCFunction)Adapter_ssl_var_lookup, METH_VARARGS, 0 },
-#endif
{ NULL, NULL}
};
@@ -4294,1987 +2821,6 @@ static PyTypeObject Adapter_Type = {
0, /*tp_is_gc*/
};
-static PyObject *newStreamObject(AdapterObject *adapter, PyObject *filelike,
- apr_size_t blksize)
-{
- StreamObject *self;
-
- self = PyObject_New(StreamObject, &Stream_Type);
- if (self == NULL)
- return NULL;
-
- self->adapter = adapter;
- self->filelike = filelike;
- self->blksize = blksize;
-
- Py_INCREF(self->adapter);
- Py_INCREF(self->filelike);
-
- return (PyObject *)self;
-}
-
-static void Stream_dealloc(StreamObject *self)
-{
- Py_DECREF(self->filelike);
- Py_DECREF(self->adapter);
-
- PyObject_Del(self);
-}
-
-static PyObject *Stream_iter(StreamObject *self)
-{
- if (!self->adapter->r) {
- PyErr_SetString(PyExc_RuntimeError, "request object has expired");
- return NULL;
- }
-
- Py_INCREF(self);
- return (PyObject *)self;
-}
-
-static PyObject *Stream_iternext(StreamObject *self)
-{
- PyObject *method = NULL;
- PyObject *args = NULL;
- PyObject *result = NULL;
-
- if (!self->adapter->r) {
- PyErr_SetString(PyExc_RuntimeError, "request object has expired");
- return NULL;
- }
-
- method = PyObject_GetAttrString(self->filelike, "read");
-
- if (!method) {
- PyErr_SetString(PyExc_KeyError,
- "file like object has no read() method");
- return 0;
- }
-
- args = Py_BuildValue("(l)", self->blksize);
- result = PyEval_CallObject(method, args);
-
- Py_DECREF(method);
- Py_DECREF(args);
-
- if (!result)
- return 0;
-
- if (PyString_Check(result)) {
- if (PyString_Size(result) == 0) {
- PyErr_SetObject(PyExc_StopIteration, Py_None);
- Py_DECREF(result);
- return 0;
- }
-
- return result;
- }
-
-#if PY_MAJOR_VERSION >= 3
- if (PyUnicode_Check(result)) {
- if (PyUnicode_GetSize(result) == 0) {
- PyErr_SetObject(PyExc_StopIteration, Py_None);
- Py_DECREF(result);
- return 0;
- }
-
- return result;
- }
-#endif
-
- Py_DECREF(result);
-
- PyErr_SetString(PyExc_TypeError,
- "file like object yielded non string type");
-
- return 0;
-}
-
-static PyObject *Stream_close(StreamObject *self, PyObject *args)
-{
- PyObject *method = NULL;
- PyObject *result = NULL;
-
- method = PyObject_GetAttrString(self->filelike, "close");
-
- if (method) {
- result = PyEval_CallObject(method, (PyObject *)NULL);
- if (!result)
- PyErr_Clear();
- Py_DECREF(method);
- }
-
- Py_XDECREF(result);
-
- Py_INCREF(Py_None);
- return Py_None;
-}
-
-static PyMethodDef Stream_methods[] = {
- { "close", (PyCFunction)Stream_close, METH_VARARGS, 0 },
- { NULL, NULL}
-};
-
-static PyTypeObject Stream_Type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "mod_wsgi.Stream", /*tp_name*/
- sizeof(StreamObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)Stream_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash*/
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
-#if defined(Py_TPFLAGS_HAVE_ITER)
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/
-#else
- Py_TPFLAGS_DEFAULT, /*tp_flags*/
-#endif
- 0, /*tp_doc*/
- 0, /*tp_traverse*/
- 0, /*tp_clear*/
- 0, /*tp_richcompare*/
- 0, /*tp_weaklistoffset*/
- (getiterfunc)Stream_iter, /*tp_iter*/
- (iternextfunc)Stream_iternext, /*tp_iternext*/
- Stream_methods, /*tp_methods*/
- 0, /*tp_members*/
- 0, /*tp_getset*/
- 0, /*tp_base*/
- 0, /*tp_dict*/
- 0, /*tp_descr_get*/
- 0, /*tp_descr_set*/
- 0, /*tp_dictoffset*/
- 0, /*tp_init*/
- 0, /*tp_alloc*/
- 0, /*tp_new*/
- 0, /*tp_free*/
- 0, /*tp_is_gc*/
-};
-
-/* Restricted object to stop access to STDIN/STDOUT. */
-
-typedef struct {
- PyObject_HEAD
- const char *s;
-} RestrictedObject;
-
-static PyTypeObject Restricted_Type;
-
-static RestrictedObject *newRestrictedObject(const char *s)
-{
- RestrictedObject *self;
-
- self = PyObject_New(RestrictedObject, &Restricted_Type);
- if (self == NULL)
- return NULL;
-
- self->s = s;
-
- return self;
-}
-
-static void Restricted_dealloc(RestrictedObject *self)
-{
- PyObject_Del(self);
-}
-
-static PyObject *Restricted_getattr(RestrictedObject *self, char *name)
-{
- PyErr_Format(PyExc_IOError, "%s access restricted by mod_wsgi", self->s);
-
- return NULL;
-}
-
-static PyTypeObject Restricted_Type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "mod_wsgi.Restricted", /*tp_name*/
- sizeof(RestrictedObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)Restricted_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- (getattrfunc)Restricted_getattr, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash*/
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /*tp_flags*/
- 0, /*tp_doc*/
- 0, /*tp_traverse*/
- 0, /*tp_clear*/
- 0, /*tp_richcompare*/
- 0, /*tp_weaklistoffset*/
- 0, /*tp_iter*/
- 0, /*tp_iternext*/
- 0, /*tp_methods*/
- 0, /*tp_members*/
- 0, /*tp_getset*/
- 0, /*tp_base*/
- 0, /*tp_dict*/
- 0, /*tp_descr_get*/
- 0, /*tp_descr_set*/
- 0, /*tp_dictoffset*/
- 0, /*tp_init*/
- 0, /*tp_alloc*/
- 0, /*tp_new*/
- 0, /*tp_free*/
- 0, /*tp_is_gc*/
-};
-
-/* Function to restrict access to use of signal(). */
-
-static PyObject *wsgi_signal_intercept(PyObject *self, PyObject *args)
-{
- PyObject *h = NULL;
- int n = 0;
-
- PyObject *m = NULL;
-
- if (!PyArg_ParseTuple(args, "iO:signal", &n, &h))
- return NULL;
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
- "mod_wsgi (pid=%d): Callback registration for "
- "signal %d ignored.", getpid(), n);
- Py_END_ALLOW_THREADS
-
- m = PyImport_ImportModule("traceback");
-
- if (m) {
- PyObject *d = NULL;
- PyObject *o = NULL;
- d = PyModule_GetDict(m);
- o = PyDict_GetItemString(d, "print_stack");
- if (o) {
- PyObject *log = NULL;
- PyObject *args = NULL;
- PyObject *result = NULL;
- Py_INCREF(o);
- log = newLogObject(NULL, APLOG_WARNING, NULL);
- args = Py_BuildValue("(OOO)", Py_None, Py_None, log);
- result = PyEval_CallObject(o, args);
- Py_XDECREF(result);
- Py_DECREF(args);
- Py_DECREF(log);
- Py_DECREF(o);
- }
- }
-
- Py_XDECREF(m);
-
- Py_INCREF(h);
-
- return h;
-}
-
-static PyMethodDef wsgi_signal_method[] = {
- { "signal", (PyCFunction)wsgi_signal_intercept, METH_VARARGS, 0 },
- { NULL, NULL }
-};
-
-/* Wrapper around Python interpreter instances. */
-
-static const char *wsgi_python_path = NULL;
-static const char *wsgi_python_eggs = NULL;
-
-#if APR_HAS_THREADS
-static int wsgi_thread_count = 0;
-static apr_threadkey_t *wsgi_thread_key;
-#endif
-
-typedef struct {
- PyObject_HEAD
- char *name;
- PyInterpreterState *interp;
- int owner;
-#if APR_HAS_THREADS
- apr_hash_t *tstate_table;
-#else
- PyThreadState *tstate;
-#endif
-} InterpreterObject;
-
-static PyTypeObject Interpreter_Type;
-
-static InterpreterObject *newInterpreterObject(const char *name)
-{
- PyInterpreterState *interp = NULL;
- InterpreterObject *self = NULL;
- PyThreadState *tstate = NULL;
- PyThreadState *save_tstate = NULL;
- PyObject *module = NULL;
- PyObject *object = NULL;
- PyObject *item = NULL;
-
- /* Create handle for interpreter and local data. */
-
- self = PyObject_New(InterpreterObject, &Interpreter_Type);
- if (self == NULL)
- return NULL;
-
- /*
- * If interpreter not named, then we want to bind
- * to the first Python interpreter instance created.
- * Give this interpreter an empty string as name.
- */
-
- if (!name) {
- interp = PyInterpreterState_Head();
- while (interp->next)
- interp = interp->next;
-
- name = "";
- }
-
- /* Save away the interpreter name. */
-
- self->name = strdup(name);
-
- if (interp) {
- /*
- * Interpreter provided to us so will not be
- * responsible for deleting it later. This will
- * be the case for the main Python interpreter.
- */
-
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Attach interpreter '%s'.",
- getpid(), name);
-
- self->interp = interp;
- self->owner = 0;
- }
- else {
- /*
- * Remember active thread state so can restore
- * it. This is actually the thread state
- * associated with simplified GIL state API.
- */
-
- save_tstate = PyThreadState_Swap(NULL);
-
- /*
- * Create the interpreter. If creation of the
- * interpreter fails it will restore the
- * existing active thread state for us so don't
- * need to worry about it in that case.
- */
-
- tstate = Py_NewInterpreter();
-
- if (!tstate) {
- PyErr_SetString(PyExc_RuntimeError, "Py_NewInterpreter() failed");
-
- Py_DECREF(self);
-
- return NULL;
- }
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Create interpreter '%s'.",
- getpid(), name);
- Py_END_ALLOW_THREADS
-
- self->interp = tstate->interp;
- self->owner = 1;
- }
-
- /*
- * Install restricted objects for STDIN and STDOUT,
- * or log object for STDOUT as appropriate. Don't do
- * this if not running on Win32 and we believe we
- * are running in single process mode, otherwise
- * it prevents use of interactive debuggers such as
- * the 'pdb' module.
- */
-
- object = newLogObject(NULL, APLOG_ERR, "stderr");
- PySys_SetObject("stderr", object);
- Py_DECREF(object);
-
-#ifndef WIN32
- if (wsgi_parent_pid != getpid()) {
-#endif
- if (wsgi_server_config->restrict_stdout == 1) {
- object = (PyObject *)newRestrictedObject("sys.stdout");
- PySys_SetObject("stdout", object);
- Py_DECREF(object);
- }
- else {
- object = newLogObject(NULL, APLOG_ERR, "stdout");
- PySys_SetObject("stdout", object);
- Py_DECREF(object);
- }
-
- if (wsgi_server_config->restrict_stdin == 1) {
- object = (PyObject *)newRestrictedObject("sys.stdin");
- PySys_SetObject("stdin", object);
- Py_DECREF(object);
- }
-#ifndef WIN32
- }
-#endif
-
- /*
- * Set sys.argv to one element list to fake out
- * modules that look there for Python command
- * line arguments as appropriate.
- */
-
- object = PyList_New(0);
-#if PY_MAJOR_VERSION >= 3
- item = PyUnicode_FromString("mod_wsgi");
-#else
- item = PyString_FromString("mod_wsgi");
-#endif
- PyList_Append(object, item);
- PySys_SetObject("argv", object);
- Py_DECREF(item);
- Py_DECREF(object);
-
- /*
- * Install intercept for signal handler registration
- * if appropriate.
- */
-
- if (wsgi_server_config->restrict_signal != 0) {
- module = PyImport_ImportModule("signal");
- PyModule_AddObject(module, "signal", PyCFunction_New(
- &wsgi_signal_method[0], NULL));
- Py_DECREF(module);
- }
-
- /*
- * Force loading of codecs into interpreter. This has to be
- * done as not otherwise done in sub interpreters and if not
- * done, code running in sub interpreters can fail on some
- * platforms if a unicode string is added in sys.path and an
- * import then done.
- */
-
- item = PyCodec_Encoder("ascii");
- Py_XDECREF(item);
-
- /*
- * If running in daemon process, override as appropriate
- * the USER, USERNAME or LOGNAME environment variables
- * so that they match the user that the process is running
- * as. Need to do this else we inherit the value from the
- * Apache parent process which is likely wrong as will be
- * root or the user than ran sudo when Apache started.
- * Can't update these for normal Apache child processes
- * as that would change the expected environment of other
- * Apache modules.
- */
-
-#ifndef WIN32
- if (wsgi_daemon_pool) {
- module = PyImport_ImportModule("os");
-
- if (module) {
- PyObject *dict = NULL;
- PyObject *key = NULL;
- PyObject *value = NULL;
-
- dict = PyModule_GetDict(module);
- object = PyDict_GetItemString(dict, "environ");
-
- if (object) {
- struct passwd *pwent;
-
- pwent = getpwuid(geteuid());
-
- if (getenv("USER")) {
-#if PY_MAJOR_VERSION >= 3
- key = PyUnicode_FromString("USER");
- value = PyUnicode_Decode(pwent->pw_name,
- strlen(pwent->pw_name),
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- key = PyString_FromString("USER");
- value = PyString_FromString(pwent->pw_name);
-#endif
-
- PyObject_SetItem(object, key, value);
-
- Py_DECREF(key);
- Py_DECREF(value);
- }
-
- if (getenv("USERNAME")) {
-#if PY_MAJOR_VERSION >= 3
- key = PyUnicode_FromString("USERNAME");
- value = PyUnicode_Decode(pwent->pw_name,
- strlen(pwent->pw_name),
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- key = PyString_FromString("USERNAME");
- value = PyString_FromString(pwent->pw_name);
-#endif
-
- PyObject_SetItem(object, key, value);
-
- Py_DECREF(key);
- Py_DECREF(value);
- }
-
- if (getenv("LOGNAME")) {
-#if PY_MAJOR_VERSION >= 3
- key = PyUnicode_FromString("LOGNAME");
- value = PyUnicode_Decode(pwent->pw_name,
- strlen(pwent->pw_name),
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- key = PyString_FromString("LOGNAME");
- value = PyString_FromString(pwent->pw_name);
-#endif
-
- PyObject_SetItem(object, key, value);
-
- Py_DECREF(key);
- Py_DECREF(value);
- }
- }
-
- Py_DECREF(module);
- }
- }
-#endif
-
- /*
- * If running in daemon process, override HOME environment
- * variable so that is matches the home directory of the
- * user that the process is running as. Need to do this as
- * Apache will inherit HOME from root user or user that ran
- * sudo and started Apache and this would be wrong. Can't
- * update HOME for normal Apache child processes as that
- * would change the expected environment of other Apache
- * modules.
- */
-
-#ifndef WIN32
- if (wsgi_daemon_pool) {
- module = PyImport_ImportModule("os");
-
- if (module) {
- PyObject *dict = NULL;
- PyObject *key = NULL;
- PyObject *value = NULL;
-
- dict = PyModule_GetDict(module);
- object = PyDict_GetItemString(dict, "environ");
-
- if (object) {
- struct passwd *pwent;
-
- pwent = getpwuid(geteuid());
-#if PY_MAJOR_VERSION >= 3
- key = PyUnicode_FromString("HOME");
- value = PyUnicode_Decode(pwent->pw_dir, strlen(pwent->pw_dir),
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- key = PyString_FromString("HOME");
- value = PyString_FromString(pwent->pw_dir);
-#endif
-
- PyObject_SetItem(object, key, value);
-
- Py_DECREF(key);
- Py_DECREF(value);
- }
-
- Py_DECREF(module);
- }
- }
-#endif
-
- /*
- * Explicitly override the PYTHON_EGG_CACHE variable if it
- * was defined by Apache configuration. For embedded processes
- * this would have been done by using WSGIPythonEggs directive.
- * For daemon processes the 'python-eggs' option to the
- * WSGIDaemonProcess directive would have needed to be used.
- */
-
- if (!wsgi_daemon_pool)
- wsgi_python_eggs = wsgi_server_config->python_eggs;
-
- if (wsgi_python_eggs) {
- module = PyImport_ImportModule("os");
-
- if (module) {
- PyObject *dict = NULL;
- PyObject *key = NULL;
- PyObject *value = NULL;
-
- dict = PyModule_GetDict(module);
- object = PyDict_GetItemString(dict, "environ");
-
- if (object) {
-#if PY_MAJOR_VERSION >= 3
- key = PyUnicode_FromString("PYTHON_EGG_CACHE");
- value = PyUnicode_Decode(wsgi_python_eggs,
- strlen(wsgi_python_eggs),
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- key = PyString_FromString("PYTHON_EGG_CACHE");
- value = PyString_FromString(wsgi_python_eggs);
-#endif
-
- PyObject_SetItem(object, key, value);
-
- Py_DECREF(key);
- Py_DECREF(value);
- }
-
- Py_DECREF(module);
- }
- }
-
- /*
- * Install user defined Python module search path. This is
- * added using site.addsitedir() so that any Python .pth
- * files are opened and additional directories so defined
- * are added to default Python search path as well. This
- * allows virtual Python environments to work. Note that
- * site.addsitedir() adds new directories at the end of
- * sys.path when they really need to be added in order at
- * the start. We therefore need to do a fiddle and shift
- * any newly added directories to the start of sys.path.
- */
-
- if (!wsgi_daemon_pool)
- wsgi_python_path = wsgi_server_config->python_path;
-
- if (wsgi_python_path) {
- PyObject *path = NULL;
-
- module = PyImport_ImportModule("site");
- path = PySys_GetObject("path");
-
- if (module && path) {
- PyObject *dict = NULL;
-
- PyObject *old = NULL;
- PyObject *new = NULL;
- PyObject *tmp = NULL;
-
- PyObject *item = NULL;
-
- int i = 0;
-
- old = PyList_New(0);
- new = PyList_New(0);
- tmp = PyList_New(0);
-
- for (i=0; i<PyList_Size(path); i++)
- PyList_Append(old, PyList_GetItem(path, i));
-
- dict = PyModule_GetDict(module);
- object = PyDict_GetItemString(dict, "addsitedir");
-
- if (object) {
- const char *start;
- const char *end;
- const char *value;
-
- PyObject *item;
- PyObject *args;
-
- PyObject *result = NULL;
-
- Py_INCREF(object);
-
- start = wsgi_python_path;
- end = strchr(start, DELIM);
-
- if (end) {
-#if PY_MAJOR_VERSION >= 3
- item = PyUnicode_Decode(start, end-start,
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- item = PyString_FromStringAndSize(start, end-start);
-#endif
- start = end+1;
-
- value = PyString_AsString(item);
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Adding '%s' to "
- "path.", getpid(), value);
- Py_END_ALLOW_THREADS
-
- args = Py_BuildValue("(O)", item);
- result = PyEval_CallObject(object, args);
-
- if (!result) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Call to "
- "'site.addsitedir()' failed for '%s', "
- "stopping.", getpid(), value);
- Py_END_ALLOW_THREADS
- }
-
- Py_XDECREF(result);
- Py_DECREF(item);
- Py_DECREF(args);
-
- end = strchr(start, DELIM);
-
- while (result && end) {
-#if PY_MAJOR_VERSION >= 3
- item = PyUnicode_Decode(start, end-start,
- Py_FileSystemDefaultEncoding,
- "surrogateescape");
-#else
- item = PyString_FromStringAndSize(start, end-start);
-#endif
- start = end+1;
-
- value = PyString_AsString(item);
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Adding '%s' to "
- "path.", getpid(), value);
- Py_END_ALLOW_THREADS
-
- args = Py_BuildValue("(O)", item);
- result = PyEval_CallObject(object, args);
-
- if (!result) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0),
- wsgi_server, "mod_wsgi (pid=%d): "
- "Call to 'site.addsitedir()' failed "
- "for '%s', stopping.",
- getpid(), value);
- Py_END_ALLOW_THREADS
- }
-
- Py_XDECREF(result);
- Py_DECREF(item);
- Py_DECREF(args);
-
- end = strchr(start, DELIM);
- }
- }
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Adding '%s' to "
- "path.", getpid(), start);
- Py_END_ALLOW_THREADS
-
- args = Py_BuildValue("(s)", start);
- result = PyEval_CallObject(object, args);
-
- if (!result) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Call to "
- "'site.addsitedir()' failed for '%s'.",
- getpid(), start);
- Py_END_ALLOW_THREADS
- }
-
- Py_XDECREF(result);
- Py_DECREF(args);
-
- Py_DECREF(object);
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Unable to locate "
- "'site.addsitedir()'.", getpid());
- Py_END_ALLOW_THREADS
- }
-
- for (i=0; i<PyList_Size(path); i++)
- PyList_Append(tmp, PyList_GetItem(path, i));
-
- for (i=0; i<PyList_Size(tmp); i++) {
- item = PyList_GetItem(tmp, i);
- if (!PySequence_Contains(old, item)) {
- int index = PySequence_Index(path, item);
- PyList_Append(new, item);
- if (index != -1)
- PySequence_DelItem(path, index);
- }
- }
-
- PyList_SetSlice(path, 0, 0, new);
-
- Py_DECREF(old);
- Py_DECREF(new);
- Py_DECREF(tmp);
- }
- else {
- if (!module) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Unable to import 'site' "
- "module.", getpid());
- Py_END_ALLOW_THREADS
- }
-
- if (!path) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Lookup for 'sys.path' "
- "failed.", getpid());
- Py_END_ALLOW_THREADS
- }
- }
-
- Py_XDECREF(module);
- }
-
- /*
- * Create 'mod_wsgi' Python module. We first try and import an
- * external Python module of the same name. The intent is
- * that this external module would provide optional features
- * implementable using pure Python code. Don't want to
- * include them in the main Apache mod_wsgi package as that
- * complicates that package and also wouldn't allow them to
- * be released to a separate schedule. It is easier for
- * people to replace Python modules package with a new
- * version than it is to replace Apache module package.
- */
-
- module = PyImport_ImportModule("mod_wsgi");
-
- if (!module) {
- PyObject *modules = NULL;
-
- modules = PyImport_GetModuleDict();
- module = PyDict_GetItemString(modules, "mod_wsgi");
-
- if (module) {
- PyErr_Print();
-
- PyDict_DelItemString(modules, "mod_wsgi");
- }
-
- PyErr_Clear();
-
- module = PyImport_AddModule("mod_wsgi");
-
- Py_INCREF(module);
- }
- else if (!*name) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Imported 'mod_wsgi'.",
- getpid());
- Py_END_ALLOW_THREADS
- }
-
- /*
- * Add Apache module version information to the Python
- * 'mod_wsgi' module.
- */
-
- PyModule_AddObject(module, "version", Py_BuildValue("(ii)",
- MOD_WSGI_MAJORVERSION_NUMBER,
- MOD_WSGI_MINORVERSION_NUMBER));
-
- /*
- * Add information about process group and application
- * group to the Python 'mod_wsgi' module.
- */
-
-#if PY_MAJOR_VERSION >= 3
- PyModule_AddObject(module, "process_group",
- PyUnicode_DecodeLatin1(wsgi_daemon_group,
- strlen(wsgi_daemon_group), NULL));
- PyModule_AddObject(module, "application_group",
- PyUnicode_DecodeLatin1(name, strlen(name), NULL));
-#else
- PyModule_AddObject(module, "process_group",
- PyString_FromString(wsgi_daemon_group));
- PyModule_AddObject(module, "application_group",
- PyString_FromString(name));
-#endif
-
- Py_DECREF(module);
-
- /*
- * Create 'apache' Python module. If this is not a daemon
- * process and it is the first interpreter created by
- * Python, we first try and import an external Python module
- * of the same name. The intent is that this external module
- * would provide the SWIG bindings for the internal Apache
- * APIs. Only support use of such bindings in the first
- * interpreter created due to threading issues in SWIG
- * generated.
- */
-
- module = NULL;
-
- if (!wsgi_daemon_pool) {
- module = PyImport_ImportModule("apache");
-
- if (!module) {
- PyObject *modules = NULL;
-
- modules = PyImport_GetModuleDict();
- module = PyDict_GetItemString(modules, "apache");
-
- if (module) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Unable to import "
- "'apache' extension module.", getpid());
- Py_END_ALLOW_THREADS
-
- PyErr_Print();
-
- PyDict_DelItemString(modules, "apache");
-
- module = NULL;
- }
-
- PyErr_Clear();
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Imported 'apache'.",
- getpid());
- Py_END_ALLOW_THREADS
- }
- }
-
- if (!module) {
- module = PyImport_AddModule("apache");
-
- Py_INCREF(module);
- }
-
- /*
- * Add Apache version information to the Python 'apache'
- * module.
- */
-
- PyModule_AddObject(module, "version", Py_BuildValue("(ii)",
- AP_SERVER_MAJORVERSION_NUMBER,
- AP_SERVER_MINORVERSION_NUMBER));
-
- Py_DECREF(module);
-
- /*
- * Restore previous thread state. Only need to do
- * this where had to create a new interpreter. This
- * is basically anything except the first Python
- * interpreter instance. We need to restore it in
- * these cases as came into the function holding the
- * simplified GIL state for this thread but creating
- * the interpreter has resulted in a new thread
- * state object being created bound to the newly
- * created interpreter. In doing this though we want
- * to cache the thread state object which has been
- * created when interpreter is created. This is so
- * it can be reused later ensuring that thread local
- * data persists between requests.
- */
-
- if (self->owner) {
-#if APR_HAS_THREADS
- int thread_id = 0;
- int *thread_handle = NULL;
-
- self->tstate_table = apr_hash_make(wsgi_server->process->pool);
-
- apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
-
- if (!thread_handle) {
- thread_id = wsgi_thread_count++;
- thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
- &thread_id, sizeof(thread_id));
- apr_threadkey_private_set(thread_handle, wsgi_thread_key);
- }
- else {
- thread_id = *thread_handle;
- }
-
- if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
- "mod_wsgi (pid=%d): Bind thread state for "
- "thread %d against interpreter '%s'.", getpid(),
- thread_id, self->name);
- }
-
- apr_hash_set(self->tstate_table, thread_handle,
- sizeof(*thread_handle), tstate);
-
- PyThreadState_Swap(save_tstate);
-#else
- self->tstate = tstate;
- PyThreadState_Swap(save_tstate);
-#endif
- }
-
- return self;
-}
-
-static void Interpreter_dealloc(InterpreterObject *self)
-{
- PyThreadState *tstate = NULL;
- PyObject *exitfunc = NULL;
- PyObject *module = NULL;
-
- PyThreadState *tstate_enter = NULL;
-
- /*
- * We should always enter here with the Python GIL
- * held and an active thread state. This should only
- * now occur when shutting down interpreter and not
- * when releasing interpreter as don't support
- * recyling of interpreters within the process. Thus
- * the thread state should be that for the main
- * Python interpreter. Where dealing with a named
- * sub interpreter, we need to change the thread
- * state to that which was originally used to create
- * that sub interpreter before doing anything.
- */
-
- tstate_enter = PyThreadState_Get();
-
- if (*self->name) {
-#if APR_HAS_THREADS
- int thread_id = 0;
- int *thread_handle = NULL;
-
- apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
-
- if (!thread_handle) {
- thread_id = wsgi_thread_count++;
- thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
- &thread_id, sizeof(thread_id));
- apr_threadkey_private_set(thread_handle, wsgi_thread_key);
- }
- else {
- thread_id = *thread_handle;
- }
-
- tstate = apr_hash_get(self->tstate_table, &thread_id,
- sizeof(thread_id));
-
- if (!tstate) {
- tstate = PyThreadState_New(self->interp);
-
- if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
- "mod_wsgi (pid=%d): Create thread state for "
- "thread %d against interpreter '%s'.", getpid(),
- thread_id, self->name);
- }
-
- apr_hash_set(self->tstate_table, thread_handle,
- sizeof(*thread_handle), tstate);
- }
-#else
- tstate = self->tstate;
-#endif
-
- /*
- * Swap to interpreter thread state that was used when
- * the sub interpreter was created.
- */
-
- PyThreadState_Swap(tstate);
- }
-
- if (self->owner) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Destroy interpreter '%s'.",
- getpid(), self->name);
- Py_END_ALLOW_THREADS
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Cleanup interpreter '%s'.",
- getpid(), self->name);
- Py_END_ALLOW_THREADS
- }
-
- /*
- * Because the thread state we are using was created outside
- * of any Python code and is not the same as the Python main
- * thread, there is no record of it within the 'threading'
- * module. We thus need to access current thread function of
- * the 'threading' module to force it to create a thread
- * handle for the thread. If we do not do this, then the
- * 'threading' modules exit function will always fail
- * because it will not be able to find a handle for this
- * thread.
- */
-
- module = PyImport_ImportModule("threading");
-
- if (!module)
- PyErr_Clear();
-
- if (module) {
- PyObject *dict = NULL;
- PyObject *func = NULL;
-
- dict = PyModule_GetDict(module);
-#if PY_MAJOR_VERSION >= 3
- func = PyDict_GetItemString(dict, "current_thread");
-#else
- func = PyDict_GetItemString(dict, "currentThread");
-#endif
- if (func) {
- PyObject *res = NULL;
- Py_INCREF(func);
- res = PyEval_CallObject(func, (PyObject *)NULL);
- if (!res) {
- PyErr_Clear();
- }
- Py_XDECREF(res);
- Py_DECREF(func);
- }
- }
-
- /*
- * In Python 2.5.1 an exit function is no longer used to
- * shutdown and wait on non daemon threads which were created
- * from Python code. Instead, in Py_Main() it explicitly
- * calls 'threading._shutdown()'. Thus need to emulate this
- * behaviour for those versions.
- */
-
- if (module) {
- PyObject *dict = NULL;
- PyObject *func = NULL;
-
- dict = PyModule_GetDict(module);
- func = PyDict_GetItemString(dict, "_shutdown");
- if (func) {
- PyObject *res = NULL;
- Py_INCREF(func);
- res = PyEval_CallObject(func, (PyObject *)NULL);
-
- if (res == NULL) {
- PyObject *m = NULL;
- PyObject *result = NULL;
-
- PyObject *type = NULL;
- PyObject *value = NULL;
- PyObject *traceback = NULL;
-
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Exception occurred within "
- "threading._shutdown().", getpid());
- Py_END_ALLOW_THREADS
-
- PyErr_Fetch(&type, &value, &traceback);
- PyErr_NormalizeException(&type, &value, &traceback);
-
- if (!value) {
- value = Py_None;
- Py_INCREF(value);
- }
-
- if (!traceback) {
- traceback = Py_None;
- Py_INCREF(traceback);
- }
-
- m = PyImport_ImportModule("traceback");
-
- if (m) {
- PyObject *d = NULL;
- PyObject *o = NULL;
- d = PyModule_GetDict(m);
- o = PyDict_GetItemString(d, "print_exception");
- if (o) {
- PyObject *log = NULL;
- PyObject *args = NULL;
- Py_INCREF(o);
- log = newLogObject(NULL, APLOG_ERR, NULL);
- args = Py_BuildValue("(OOOOO)", type, value,
- traceback, Py_None, log);
- result = PyEval_CallObject(o, args);
- Py_DECREF(args);
- Py_DECREF(log);
- Py_DECREF(o);
- }
- }
-
- if (!result) {
- /*
- * If can't output exception and traceback then
- * use PyErr_Print to dump out details of the
- * exception. For SystemExit though if we do
- * that the process will actually be terminated
- * so can only clear the exception information
- * and keep going.
- */
-
- PyErr_Restore(type, value, traceback);
-
- if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
- PyErr_Print();
- PyErr_Clear();
- }
- else {
- PyErr_Clear();
- }
- }
- else {
- Py_XDECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
- }
-
- Py_XDECREF(result);
-
- Py_XDECREF(m);
- }
-
- Py_XDECREF(res);
- Py_DECREF(func);
- }
- }
-
- /* Finally done with 'threading' module. */
-
- Py_XDECREF(module);
-
- /*
- * Invoke exit functions by calling sys.exitfunc() for
- * Python 2.X and atexit._run_exitfuncs() for Python 3.X.
- * Note that in Python 3.X we can't call this on main Python
- * interpreter as for Python 3.X it doesn't deregister
- * functions as called, so have no choice but to rely on
- * Py_Finalize() to do it for the main interpreter. Now
- * that simplified GIL state API usage sorted out, this
- * should be okay.
- */
-
- module = NULL;
-
-#if PY_MAJOR_VERSION >= 3
- if (self->owner) {
- module = PyImport_ImportModule("atexit");
-
- if (module) {
- PyObject *dict = NULL;
-
- dict = PyModule_GetDict(module);
- exitfunc = PyDict_GetItemString(dict, "_run_exitfuncs");
- }
- else
- PyErr_Clear();
- }
-#else
- exitfunc = PySys_GetObject("exitfunc");
-#endif
-
- if (exitfunc) {
- PyObject *res = NULL;
- Py_INCREF(exitfunc);
- PySys_SetObject("exitfunc", (PyObject *)NULL);
- res = PyEval_CallObject(exitfunc, (PyObject *)NULL);
-
- if (res == NULL) {
- PyObject *m = NULL;
- PyObject *result = NULL;
-
- PyObject *type = NULL;
- PyObject *value = NULL;
- PyObject *traceback = NULL;
-
- if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): SystemExit exception "
- "raised by exit functions ignored.", getpid());
- Py_END_ALLOW_THREADS
- }
- else {
- Py_BEGIN_ALLOW_THREADS
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
- "mod_wsgi (pid=%d): Exception occurred within "
- "exit functions.", getpid());
- Py_END_ALLOW_THREADS
- }
-
- PyErr_Fetch(&type, &value, &traceback);
- PyErr_NormalizeException(&type, &value, &traceback);
-
- if (!value) {
- value = Py_None;
- Py_INCREF(value);
- }
-
- if (!traceback) {
- traceback = Py_None;
- Py_INCREF(traceback);
- }
-
- m = PyImport_ImportModule("traceback");
-
- if (m) {
- PyObject *d = NULL;
- PyObject *o = NULL;
- d = PyModule_GetDict(m);
- o = PyDict_GetItemString(d, "print_exception");
- if (o) {
- PyObject *log = NULL;
- PyObject *args = NULL;
- Py_INCREF(o);
- log = newLogObject(NULL, APLOG_ERR, NULL);
- args = Py_BuildValue("(OOOOO)", type, value,
- traceback, Py_None, log);
- result = PyEval_CallObject(o, args);
- Py_DECREF(args);
- Py_DECREF(log);
- Py_DECREF(o);
- }
- }
-
- if (!result) {
- /*
- * If can't output exception and traceback then
- * use PyErr_Print to dump out details of the
- * exception. For SystemExit though if we do
- * that the process will actually be terminated
- * so can only clear the exception information
- * and keep going.
- */
-
- PyErr_Restore(type, value, traceback);
-
- if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
- PyErr_Print();
- PyErr_Clear();
- }
- else {
- PyErr_Clear();
- }
- }
- else {
- Py_XDECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
- }
-
- Py_XDECREF(result);
-
- Py_XDECREF(m);
- }
-
- Py_XDECREF(res);
- Py_DECREF(exitfunc);
- }
-
- Py_XDECREF(module);
-
- /* If we own it, we destroy it. */
-
- if (self->owner) {
- /*
- * We need to destroy all the thread state objects
- * associated with the interpreter. If there are
- * background threads that were created then this
- * may well cause them to crash the next time they
- * try to run. Only saving grace is that we are
- * trying to shutdown the process.
- */
-
- PyThreadState *tstate_save = tstate;
- PyThreadState *tstate_next = NULL;
-
- PyThreadState_Swap(NULL);
-
- tstate = tstate->interp->tstate_head;
- while (tstate) {
- tstate_next = tstate->next;
- if (tstate != tstate_save) {
- PyThreadState_Swap(tstate);
- PyThreadState_Clear(tstate);
- PyThreadState_Swap(NULL);
- PyThreadState_Delete(tstate);
- }
- tstate = tstate_next;
- }
-
- tstate = tstate_save;
-
- PyThreadState_Swap(tstate);
-
- /* Can now destroy the interpreter. */
-
- Py_EndInterpreter(tstate);
-
- PyThreadState_Swap(tstate_enter);
- }
-
- free(self->name);
-
- PyObject_Del(self);
-}
-
-static PyTypeObject Interpreter_Type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "mod_wsgi.Interpreter", /*tp_name*/
- sizeof(InterpreterObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)Interpreter_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash*/
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /*tp_flags*/
- 0, /*tp_doc*/
- 0, /*tp_traverse*/
- 0, /*tp_clear*/
- 0, /*tp_richcompare*/
- 0, /*tp_weaklistoffset*/
- 0, /*tp_iter*/
- 0, /*tp_iternext*/
- 0, /*tp_methods*/
- 0, /*tp_members*/
- 0, /*tp_getset*/
- 0, /*tp_base*/
- 0, /*tp_dict*/
- 0, /*tp_descr_get*/
- 0, /*tp_descr_set*/
- 0, /*tp_dictoffset*/
- 0, /*tp_init*/
- 0, /*tp_alloc*/
- 0, /*tp_new*/
- 0, /*tp_free*/
- 0, /*tp_is_gc*/
-};
-
-/*
- * Startup and shutdown of Python interpreter. In mod_wsgi if
- * the Python interpreter hasn't been initialised by another
- * Apache module such as mod_python, we will take control and
- * initialise it. Need to remember that we initialised Python
- * and whether done in parent or child process as when done in
- * the parent we also take responsibility for performing special
- * Python fixups after Apache is forked and child process has
- * run.
- *
- * Note that by default we now defer initialisation of Python
- * until after the fork of processes as Python 3.X by design
- * doesn't clean up properly when it is destroyed causing
- * significant memory leaks into Apache parent process on an
- * Apache restart. Some Python 2.X versions also have real
- * memory leaks but not near as much. The result of deferring
- * initialisation is that can't benefit from copy on write
- * semantics for loaded data across a fork. Each process will
- * therefore have higher memory requirement where Python needs
- * to be used.
- */
-
-static int wsgi_python_initialized = 0;
-
-#if defined(MOD_WSGI_DISABLE_EMBEDDED)
-static int wsgi_python_required = 0;
-#else
-static int wsgi_python_required = -1;
-#endif
-
-static int wsgi_python_after_fork = 1;
-
-static void wsgi_python_version(void)
-{
- const char *compile = PY_VERSION;
- const char *dynamic = 0;
-
- dynamic = strtok((char *)Py_GetVersion(), " ");
-
- if (strcmp(compile, dynamic) != 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
- "mod_wsgi: Compiled for Python/%s.", compile);
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server,
- "mod_wsgi: Runtime using Python/%s.", dynamic);
- }
-}
-
-static apr_status_t wsgi_python_term()
-{
- PyInterpreterState *interp = NULL;
- PyThreadState *tstate = NULL;
-
- PyObject *module = NULL;
-
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Terminating Python.", getpid());
-
- /*
- * We should be executing in the main thread again at this
- * point but without the GIL, so simply restore the original
- * thread state for that thread that we remembered when we
- * initialised the interpreter.
- */
-
- PyEval_AcquireThread(wsgi_main_tstate);
-
- /*
- * Work around bug in Python 3.X whereby it will crash if
- * atexit imported into sub interpreter, but never imported
- * into main interpreter before calling Py_Finalize(). We
- * perform an import of atexit module and it as side effect
- * must be performing required initialisation.
- */
-
- module = PyImport_ImportModule("atexit");
- Py_XDECREF(module);
-
- /*
- * In Python 2.6.5 and Python 3.1.2 the shutdown of
- * threading was moved back into Py_Finalize() for the main
- * Python interpreter. Because we shutting down threading
- * ourselves, the second call results in errors being logged
- * when Py_Finalize() is called and the shutdown function
- * called a second time. The errors don't indicate any real
- * problem and the threading module ignores them anyway.
- * Whether we are using Python with this changed behaviour
- * can only be checked by looking at run time version.
- * Rather than try and add a dynamic check, create a fake
- * 'dummy_threading' module as the presence of that shuts up
- * the messages. It doesn't matter that the rest of the
- * shutdown function still runs as everything is already
- * stopped so doesn't do anything.
- */
-
- if (!PyImport_AddModule("dummy_threading"))
- PyErr_Clear();
-
- Py_Finalize();
-
- wsgi_python_initialized = 0;
-
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Python has shutdown.", getpid());
-
- return APR_SUCCESS;
-}
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-static void wsgi_python_parent_cleanup(void *data)
-#else
-static apr_status_t wsgi_python_parent_cleanup(void *data)
-#endif
-{
- if (wsgi_parent_pid == getpid()) {
- /*
- * Destroy Python itself including the main
- * interpreter. If mod_python is being loaded it
- * is left to mod_python to destroy Python,
- * although it currently doesn't do so.
- */
-
- if (wsgi_python_initialized)
- wsgi_python_term();
- }
-
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
- return APR_SUCCESS;
-#endif
-}
-
-
-static void wsgi_python_init(apr_pool_t *p)
-{
- const char *python_home = 0;
-
-#if defined(DARWIN) && (AP_SERVER_MAJORVERSION_NUMBER < 2)
- static int initialized = 0;
-#else
- static int initialized = 1;
-#endif
-
- /* Perform initialisation if required. */
-
- if (!Py_IsInitialized() || !initialized) {
-
- /* Enable Python 3.0 migration warnings. */
-
-#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
- if (wsgi_server_config->py3k_warning_flag == 1)
- Py_Py3kWarningFlag++;
-#endif
-
- /* Disable writing of byte code files. */
-
-#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
- if (wsgi_server_config->dont_write_bytecode == 1)
- Py_DontWriteBytecodeFlag++;
-#endif
-
- /* Check for Python paths and optimisation flag. */
-
- if (wsgi_server_config->python_optimize > 0)
- Py_OptimizeFlag = wsgi_server_config->python_optimize;
- else
- Py_OptimizeFlag = 0;
-
- /* Check for control options for Python warnings. */
-
- if (wsgi_server_config->python_warnings) {
- apr_array_header_t *options = NULL;
- char **entries;
-
- int i;
-
- options = wsgi_server_config->python_warnings;
- entries = (char **)options->elts;
-
- for (i = 0; i < options->nelts; ++i) {
-#if PY_MAJOR_VERSION >= 3
- wchar_t *s = NULL;
- int len = strlen(entries[i])+1;
-
- s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t));
-
-#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
- wsgi_utf8_to_unicode_path(s, len, entries[i]);
-#else
- mbstowcs(s, entries[i], len);
-#endif
- PySys_AddWarnOption(s);
-#else
- PySys_AddWarnOption(entries[i]);
-#endif
- }
- }
-
- /* Check for Python HOME being overridden. */
-
- python_home = wsgi_server_config->python_home;
-
-#if defined(MOD_WSGI_WITH_DAEMONS)
- if (wsgi_daemon_process && wsgi_daemon_process->group->python_home)
- python_home = wsgi_daemon_process->group->python_home;
-#endif
-
-#if PY_MAJOR_VERSION >= 3
- if (python_home) {
- wchar_t *s = NULL;
- int len = strlen(python_home)+1;
-
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Python home %s.", getpid(),
- python_home);
-
- s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t));
-
-#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
- wsgi_utf8_to_unicode_path(s, len, python_home);
-#else
- mbstowcs(s, python_home, len);
-#endif
- Py_SetPythonHome(s);
- }
-#else
- if (python_home) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Python home %s.", getpid(),
- python_home);
-
- Py_SetPythonHome((char *)python_home);
- }
-#endif
-
- /*
- * Work around bug in Python 3.1 where it will crash
- * when used in non console application on Windows if
- * stdin/stdout have been initialised and aren't null.
- */
-
-#if defined(WIN32) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 3
- _wputenv(L"PYTHONIOENCODING=cp1252:backslashreplace");
-#endif
-
- /* Initialise Python. */
-
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Initializing Python.", getpid());
-
- initialized = 1;
-
- Py_Initialize();
-
- /* Initialise threading. */
-
- PyEval_InitThreads();
-
- /*
- * We now want to release the GIL. Before we do that
- * though we remember what the current thread state is.
- * We will use that later to restore the main thread
- * state when we want to cleanup interpreters on
- * shutdown.
- */
-
- wsgi_main_tstate = PyThreadState_Get();
- PyEval_ReleaseThread(wsgi_main_tstate);
-
- wsgi_python_initialized = 1;
-
- /*
- * Register cleanups to be performed on parent restart
- * or shutdown. This will destroy Python itself.
- */
-
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- ap_register_cleanup(p, NULL, wsgi_python_parent_cleanup,
- ap_null_cleanup);
-#else
- apr_pool_cleanup_register(p, NULL, wsgi_python_parent_cleanup,
- apr_pool_cleanup_null);
-#endif
- }
-}
-
-/*
- * Functions for acquiring and subsequently releasing desired
- * Python interpreter instance. When acquiring the interpreter
- * a new interpreter instance will be created on demand if it
- * is required. The Python GIL will be held on return when the
- * interpreter is acquired.
- */
-
-#if APR_HAS_THREADS
-static apr_thread_mutex_t* wsgi_interp_lock = NULL;
-static apr_thread_mutex_t* wsgi_module_lock = NULL;
-#endif
-
-static PyObject *wsgi_interpreters = NULL;
-
-static InterpreterObject *wsgi_acquire_interpreter(const char *name)
-{
- PyThreadState *tstate = NULL;
- PyInterpreterState *interp = NULL;
- InterpreterObject *handle = NULL;
-
- PyGILState_STATE state;
-
- /*
- * In a multithreaded MPM must protect the
- * interpreters table. This lock is only needed to
- * avoid a secondary thread coming in and creating
- * the same interpreter if Python releases the GIL
- * when an interpreter is being created.
- */
-
-#if APR_HAS_THREADS
- apr_thread_mutex_lock(wsgi_interp_lock);
-#endif
-
- /*
- * This function should never be called when the
- * Python GIL is held, so need to acquire it. Even
- * though we may need to work with a sub
- * interpreter, we need to acquire GIL against main
- * interpreter first to work with interpreter
- * dictionary.
- */
-
- state = PyGILState_Ensure();
-
- /*
- * Check if already have interpreter instance and
- * if not need to create one.
- */
-
- handle = (InterpreterObject *)PyDict_GetItemString(wsgi_interpreters,
- name);
-
- if (!handle) {
- handle = newInterpreterObject(name);
-
- if (!handle) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
- "mod_wsgi (pid=%d): Cannot create interpreter '%s'.",
- getpid(), name);
-
- PyErr_Print();
- PyErr_Clear();
-
- PyGILState_Release(state);
-
-#if APR_HAS_THREADS
- apr_thread_mutex_unlock(wsgi_interp_lock);
-#endif
- return NULL;
- }
-
- PyDict_SetItemString(wsgi_interpreters, name, (PyObject *)handle);
- }
- else
- Py_INCREF(handle);
-
- interp = handle->interp;
-
- /*
- * Create new thread state object. We should only be
- * getting called where no current active thread
- * state, so no need to remember the old one. When
- * working with the main Python interpreter always
- * use the simplified API for GIL locking so any
- * extension modules which use that will still work.
- */
-
- PyGILState_Release(state);
-
-#if APR_HAS_THREADS
- apr_thread_mutex_unlock(wsgi_interp_lock);
-#endif
-
- if (*name) {
-#if APR_HAS_THREADS
- int thread_id = 0;
- int *thread_handle = NULL;
-
- apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
-
- if (!thread_handle) {
- thread_id = wsgi_thread_count++;
- thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
- &thread_id, sizeof(thread_id));
- apr_threadkey_private_set(thread_handle, wsgi_thread_key);
- }
- else {
- thread_id = *thread_handle;
- }
-
- tstate = apr_hash_get(handle->tstate_table, &thread_id,
- sizeof(thread_id));
-
- if (!tstate) {
- tstate = PyThreadState_New(interp);
-
- if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
- "mod_wsgi (pid=%d): Create thread state for "
- "thread %d against interpreter '%s'.", getpid(),
- thread_id, handle->name);
- }
-
- apr_hash_set(handle->tstate_table, thread_handle,
- sizeof(*thread_handle), tstate);
- }
-#else
- tstate = handle->tstate;
-#endif
-
- PyEval_AcquireThread(tstate);
- }
- else {
- PyGILState_Ensure();
-
- /*
- * When simplified GIL state API is used, the thread
- * local data only persists for the extent of the top
- * level matching ensure/release calls. We want to
- * extend lifetime of the thread local data beyond
- * that, retaining it for all requests within the one
- * thread for the life of the process. To do that we
- * need to artificially increment the reference count
- * for the associated thread state object.
- */
-
- tstate = PyThreadState_Get();
- if (tstate && tstate->gilstate_counter == 1)
- tstate->gilstate_counter++;
- }
-
- return handle;
-}
-
-static void wsgi_release_interpreter(InterpreterObject *handle)
-{
- PyThreadState *tstate = NULL;
-
- PyGILState_STATE state;
-
- /*
- * Need to release and destroy the thread state that
- * was created against the interpreter. This will
- * release the GIL. Note that it should be safe to
- * always assume that the simplified GIL state API
- * lock was originally unlocked as always calling in
- * from an Apache thread when we acquire the
- * interpreter in the first place.
- */
-
- if (*handle->name) {
- tstate = PyThreadState_Get();
- PyEval_ReleaseThread(tstate);
- }
- else
- PyGILState_Release(PyGILState_UNLOCKED);
-
- /*
- * Need to reacquire the Python GIL just so we can
- * decrement our reference count to the interpreter
- * itself. If the interpreter has since been removed
- * from the table of interpreters this will result
- * in its destruction if its the last reference.
- */
-
- state = PyGILState_Ensure();
-
- Py_DECREF(handle);
-
- PyGILState_Release(state);
-}
-
/*
* Code for importing a module from source by absolute path.
*/
@@ -6290,6 +2836,8 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
PyObject *co = NULL;
struct _node *n = NULL;
+ PyObject *transaction = NULL;
+
#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
apr_wchar_t wfilename[APR_PATH_MAX];
#endif
@@ -6297,13 +2845,13 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
if (exists) {
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Reloading WSGI script '%s'.", getpid(),
process_group, application_group, filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Reloading WSGI script '%s'.", getpid(),
process_group, application_group, filename);
@@ -6313,13 +2861,13 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
else {
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Loading WSGI script '%s'.", getpid(),
process_group, application_group, filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Loading WSGI script '%s'.", getpid(),
process_group, application_group, filename);
@@ -6333,14 +2881,14 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d, process='%s', "
"application='%s'): Failed to convert '%s' "
"to UCS2 filename.", getpid(),
process_group, application_group, filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d, process='%s', "
"application='%s'): Failed to convert '%s' "
"to UCS2 filename.", getpid(),
@@ -6350,7 +2898,7 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
return NULL;
}
- fp = _wfopen(wfilename, "r");
+ fp = _wfopen(wfilename, L"r");
#else
fp = fopen(filename, "r");
#endif
@@ -6358,13 +2906,13 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
if (!fp) {
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Call to fopen() failed for '%s'.", getpid(),
process_group, application_group, filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Call to fopen() failed for '%s'.", getpid(),
process_group, application_group, filename);
@@ -6380,13 +2928,13 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
if (!n) {
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Failed to parse WSGI script file '%s'.", getpid(),
process_group, application_group, filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d, process='%s', application='%s'): "
"Failed to parse WSGI script file '%s'.", getpid(),
process_group, application_group, filename);
@@ -6395,6 +2943,85 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
return NULL;
}
+ if (wsgi_newrelic_config_file) {
+ PyObject *module = NULL;
+
+ PyObject *application = NULL;
+
+
+ module = PyImport_ImportModule("newrelic.api.application");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *object = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "application");
+
+ Py_INCREF(object);
+ application = PyObject_CallFunctionObjArgs(object, NULL);
+ Py_DECREF(object);
+
+ Py_DECREF(module);
+ module = NULL;
+
+ if (!application)
+ PyErr_Clear();
+ }
+ else
+ PyErr_Clear();
+
+ if (application)
+ module = PyImport_ImportModule("newrelic.api.background_task");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *object = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "BackgroundTask");
+
+ if (object) {
+ PyObject *args = NULL;
+
+ Py_INCREF(object);
+
+ args = Py_BuildValue("(Oss)", application, filename,
+ "Script/Import");
+ transaction = PyObject_Call(object, args, NULL);
+
+ if (!transaction)
+ PyErr_WriteUnraisable(object);
+
+ Py_DECREF(args);
+ Py_DECREF(object);
+
+ if (transaction) {
+ PyObject *result = NULL;
+
+ object = PyObject_GetAttrString(
+ transaction, "__enter__");
+ args = PyTuple_Pack(0);
+ result = PyObject_Call(object, args, NULL);
+
+ if (!result)
+ PyErr_WriteUnraisable(object);
+
+ Py_XDECREF(result);
+ Py_DECREF(object);
+ }
+ }
+
+ Py_DECREF(module);
+ }
+ else
+ PyErr_Print();
+
+ Py_XDECREF(application);
+ }
+ else
+ PyErr_Clear();
+
co = (PyObject *)PyNode_Compile(n, filename);
PyNode_Free(n);
@@ -6403,19 +3030,71 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
Py_XDECREF(co);
- if (m) {
- PyObject *object = NULL;
+ if (wsgi_newrelic_config_file) {
+ if (transaction) {
+ PyObject *object;
- if (!r || strcmp(r->filename, filename)) {
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- struct stat finfo;
- if (stat(filename, &finfo) == -1) {
- object = PyLong_FromLongLong(0);
+ object = PyObject_GetAttrString(transaction, "__exit__");
+
+ if (m) {
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+
+ args = PyTuple_Pack(3, Py_None, Py_None, Py_None);
+ result = PyObject_Call(object, args, NULL);
+
+ if (!result)
+ PyErr_WriteUnraisable(object);
+ else
+ Py_DECREF(result);
+
+ Py_DECREF(args);
}
else {
- object = PyLong_FromLongLong(finfo.st_mtime);
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ PyErr_Fetch(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ args = PyTuple_Pack(3, type, value, traceback);
+ result = PyObject_Call(object, args, NULL);
+
+ if (!result)
+ PyErr_WriteUnraisable(object);
+ else
+ Py_DECREF(result);
+
+ Py_DECREF(args);
+
+ PyErr_Restore(type, value, traceback);
}
-#else
+
+ Py_DECREF(object);
+
+ Py_DECREF(transaction);
+ }
+ }
+
+ if (m) {
+ PyObject *object = NULL;
+
+ if (!r || strcmp(r->filename, filename)) {
apr_finfo_t finfo;
if (apr_stat(&finfo, filename, APR_FINFO_NORM,
pool) != APR_SUCCESS) {
@@ -6424,26 +3103,21 @@ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r,
else {
object = PyLong_FromLongLong(finfo.mtime);
}
-#endif
}
else {
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- object = PyLong_FromLongLong(r->finfo.st_mtime);
-#else
object = PyLong_FromLongLong(r->finfo.mtime);
-#endif
}
PyModule_AddObject(m, "__mtime__", object);
}
else {
Py_BEGIN_ALLOW_THREADS
if (r) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI script '%s' cannot "
"be loaded as Python module.", getpid(), filename);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Target WSGI script '%s' cannot "
"be loaded as Python module.", getpid(), filename);
}
@@ -6470,15 +3144,6 @@ static int wsgi_reload_required(apr_pool_t *pool, request_rec *r,
mtime = PyLong_AsLongLong(object);
if (!r || strcmp(r->filename, filename)) {
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- struct stat finfo;
- if (stat(filename, &finfo) == -1) {
- return 1;
- }
- else if (mtime != finfo.st_mtime) {
- return 1;
- }
-#else
apr_finfo_t finfo;
if (apr_stat(&finfo, filename, APR_FINFO_NORM,
pool) != APR_SUCCESS) {
@@ -6487,16 +3152,10 @@ static int wsgi_reload_required(apr_pool_t *pool, request_rec *r,
else if (mtime != finfo.mtime) {
return 1;
}
-#endif
}
else {
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- if (mtime != r->finfo.st_mtime)
- return 1;
-#else
if (mtime != r->finfo.mtime)
return 1;
-#endif
}
}
else
@@ -6561,6 +3220,10 @@ static char *wsgi_module_name(apr_pool_t *pool, const char *filename)
return apr_pstrcat(pool, "_mod_wsgi_", hash, NULL);
}
+#if APR_HAS_THREADS
+static apr_thread_mutex_t* wsgi_module_lock = NULL;
+#endif
+
static int wsgi_execute_script(request_rec *r)
{
WSGIRequestConfig *config = NULL;
@@ -6587,22 +3250,13 @@ static int wsgi_execute_script(request_rec *r)
interp = wsgi_acquire_interpreter(config->application_group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), config->application_group);
return HTTP_INTERNAL_SERVER_ERROR;
}
- /* Calculate the Python module name to be used for script. */
-
- if (config->handler_script && *config->handler_script)
- script = config->handler_script;
- else
- script = r->filename;
-
- name = wsgi_module_name(r->pool, script);
-
/*
* Use a lock around the check to see if the module is
* already loaded and the import of the module to prevent
@@ -6616,71 +3270,120 @@ static int wsgi_execute_script(request_rec *r)
Py_END_ALLOW_THREADS
#endif
- modules = PyImport_GetModuleDict();
- module = PyDict_GetItemString(modules, name);
+ /* Calculate the Python module name to be used for script. */
- Py_XINCREF(module);
+ if (config->handler_script && *config->handler_script) {
+ script = config->handler_script;
- if (module)
- exists = 1;
+#if 0
+ /*
+ * Check for whether a module reference is provided
+ * as opposed to a filesystem path.
+ */
- /*
- * If script reloading is enabled and the module for it has
- * previously been loaded, see if it has been modified since
- * the last time it was accessed. For a handler script will
- * also see if it contains a custom function for determining
- * if a reload should be performed.
- */
+ if (strlen(script) > 2 && script[0] == '(' &&
+ script[strlen(script)-1] == ')') {
+ name = apr_pstrndup(r->pool, script+1, strlen(script)-2);
- if (module && config->script_reloading) {
- if (wsgi_reload_required(r->pool, r, script, module, r->filename)) {
- /*
- * Script file has changed. Discard reference to
- * loaded module and work out what action we are
- * supposed to take. Choices are process reloading
- * and module reloading. Process reloading cannot be
- * performed unless a daemon process is being used.
- */
+ module = PyImport_ImportModule(name);
- Py_DECREF(module);
- module = NULL;
+ if (!module) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "mod_wsgi (pid=%d): Failed to import handler "
+ "via Python module reference %s.", getpid(),
+ script);
+ Py_END_ALLOW_THREADS
-#if defined(MOD_WSGI_WITH_DAEMONS)
- if (*config->process_group) {
+ wsgi_log_python_error(r, NULL, r->filename);
+ }
+ }
+#endif
+ }
+ else
+ script = r->filename;
+
+ if (!module) {
+ name = wsgi_module_name(r->pool, script);
+
+ modules = PyImport_GetModuleDict();
+ module = PyDict_GetItemString(modules, name);
+
+ Py_XINCREF(module);
+
+ if (module)
+ exists = 1;
+
+ /*
+ * If script reloading is enabled and the module for it has
+ * previously been loaded, see if it has been modified since
+ * the last time it was accessed. For a handler script will
+ * also see if it contains a custom function for determining
+ * if a reload should be performed.
+ */
+
+ if (module && config->script_reloading) {
+ if (wsgi_reload_required(r->pool, r, script, module, r->filename)) {
/*
- * Need to restart the daemon process. We bail
- * out on the request process here, sending back
- * a special response header indicating that
- * process is being restarted and that remote
- * end should abandon connection and attempt to
- * reconnect again. We also need to signal this
- * process so it will actually shutdown. The
- * process supervisor code will ensure that it
- * is restarted.
+ * Script file has changed. Discard reference to
+ * loaded module and work out what action we are
+ * supposed to take. Choices are process reloading
+ * and module reloading. Process reloading cannot be
+ * performed unless a daemon process is being used.
*/
- Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
- "mod_wsgi (pid=%d): Force restart of "
- "process '%s'.", getpid(),
- config->process_group);
- Py_END_ALLOW_THREADS
+ Py_DECREF(module);
+ module = NULL;
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ if (*config->process_group) {
+ /*
+ * Need to restart the daemon process. We bail
+ * out on the request process here, sending back
+ * a special response header indicating that
+ * process is being restarted and that remote
+ * end should abandon connection and attempt to
+ * reconnect again. We also need to signal this
+ * process so it will actually shutdown. The
+ * process supervisor code will ensure that it
+ * is restarted.
+ */
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
+ "mod_wsgi (pid=%d): Force restart of "
+ "process '%s'.", getpid(),
+ config->process_group);
+ Py_END_ALLOW_THREADS
#if APR_HAS_THREADS
- apr_thread_mutex_unlock(wsgi_module_lock);
+ apr_thread_mutex_unlock(wsgi_module_lock);
#endif
- wsgi_release_interpreter(interp);
+ wsgi_release_interpreter(interp);
- r->status = HTTP_INTERNAL_SERVER_ERROR;
- r->status_line = "200 Rejected";
+ r->status = HTTP_INTERNAL_SERVER_ERROR;
+ r->status_line = "200 Rejected";
- wsgi_daemon_shutdown++;
- kill(getpid(), SIGINT);
+ wsgi_daemon_shutdown++;
+ kill(getpid(), SIGINT);
- return OK;
- }
- else {
+ return OK;
+ }
+ else {
+ /*
+ * Need to reload just the script module. Remove
+ * the module from the modules dictionary before
+ * reloading it again. If code is executing
+ * within the module at the time, the callers
+ * reference count on the module should ensure
+ * it isn't actually destroyed until it is
+ * finished.
+ */
+
+ PyDict_DelItemString(modules, name);
+ }
+#else
/*
* Need to reload just the script module. Remove
* the module from the modules dictionary before
@@ -6692,20 +3395,8 @@ static int wsgi_execute_script(request_rec *r)
*/
PyDict_DelItemString(modules, name);
- }
-#else
- /*
- * Need to reload just the script module. Remove
- * the module from the modules dictionary before
- * reloading it again. If code is executing
- * within the module at the time, the callers
- * reference count on the module should ensure
- * it isn't actually destroyed until it is
- * finished.
- */
-
- PyDict_DelItemString(modules, name);
#endif
+ }
}
}
@@ -6753,6 +3444,11 @@ static int wsgi_execute_script(request_rec *r)
}
#endif
+ /* If embedded mode, need to do request count. */
+
+ if (!wsgi_daemon_pool)
+ wsgi_start_request();
+
/* Load module if not already loaded. */
if (!module) {
@@ -6821,16 +3517,14 @@ static int wsgi_execute_script(request_rec *r)
Py_XDECREF(object);
Py_XDECREF(method);
-#if defined(MOD_WSGI_WITH_BUCKETS)
adapter->bb = NULL;
-#endif
}
Py_XDECREF((PyObject *)adapter);
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI script '%s' does "
"not contain WSGI application '%s'.",
getpid(), script, config->callable_object);
@@ -6849,6 +3543,11 @@ static int wsgi_execute_script(request_rec *r)
Py_XDECREF(module);
+ /* If embedded mode, need to do request count. */
+
+ if (!wsgi_daemon_pool)
+ wsgi_end_request();
+
wsgi_release_interpreter(interp);
return status;
@@ -6861,11 +3560,7 @@ static int wsgi_execute_script(request_rec *r)
* function to delete interpreter on process shutdown.
*/
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-static void wsgi_python_child_cleanup(void *data)
-#else
static apr_status_t wsgi_python_child_cleanup(void *data)
-#endif
{
PyObject *interp = NULL;
@@ -6898,7 +3593,7 @@ static apr_status_t wsgi_python_child_cleanup(void *data)
* destroying interpreters we own.
*/
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Destroying interpreters.", getpid());
PyDict_Clear(wsgi_interpreters);
@@ -6937,15 +3632,12 @@ static apr_status_t wsgi_python_child_cleanup(void *data)
if (wsgi_python_initialized)
wsgi_python_term();
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
return APR_SUCCESS;
-#endif
}
static void wsgi_python_child_init(apr_pool_t *p)
{
PyGILState_STATE state;
- PyInterpreterState *interp = NULL;
PyObject *object = NULL;
int thread_id = 0;
@@ -6975,10 +3667,7 @@ static void wsgi_python_child_init(apr_pool_t *p)
PyType_Ready(&Restricted_Type);
PyType_Ready(&Interpreter_Type);
PyType_Ready(&Dispatch_Type);
-
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
PyType_Ready(&Auth_Type);
-#endif
/* Initialise Python interpreter instance table and lock. */
@@ -7025,13 +3714,8 @@ static void wsgi_python_child_init(apr_pool_t *p)
/* Register cleanups to performed on process shutdown. */
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- ap_register_cleanup(p, NULL, wsgi_python_child_cleanup,
- ap_null_cleanup);
-#else
apr_pool_cleanup_register(p, NULL, wsgi_python_child_cleanup,
apr_pool_cleanup_null);
-#endif
/* Loop through import scripts for this process and load them. */
@@ -7047,8 +3731,6 @@ static void wsgi_python_child_init(apr_pool_t *p)
entries = (WSGIScriptFile *)scripts->elts;
for (i = 0; i < scripts->nelts; ++i) {
- int l = 0;
-
entry = &entries[i];
if (!strcmp(wsgi_daemon_group, entry->process_group)) {
@@ -7061,7 +3743,7 @@ static void wsgi_python_child_init(apr_pool_t *p)
interp = wsgi_acquire_interpreter(entry->application_group);
if (!interp) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
"mod_wsgi (pid=%d): Cannot acquire "
"interpreter '%s'.", getpid(),
entry->application_group);
@@ -7407,6 +4089,7 @@ static const char *wsgi_add_python_warnings(cmd_parms *cmd, void *mconfig,
return NULL;
}
+#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
static const char *wsgi_set_py3k_warning_flag(cmd_parms *cmd, void *mconfig,
const char *f)
{
@@ -7429,6 +4112,29 @@ static const char *wsgi_set_py3k_warning_flag(cmd_parms *cmd, void *mconfig,
return NULL;
}
+static const char *wsgi_set_dont_write_bytecode(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+
+ if (strcasecmp(f, "Off") == 0)
+ sconfig->dont_write_bytecode = 0;
+ else if (strcasecmp(f, "On") == 0)
+ sconfig->dont_write_bytecode = 1;
+ else
+ return "WSGIDontWriteBytecode must be one of: Off | On";
+
+ return NULL;
+}
+#endif
+
static const char *wsgi_set_python_optimize(cmd_parms *cmd, void *mconfig,
const char *f)
{
@@ -7493,6 +4199,41 @@ static const char *wsgi_set_python_eggs(cmd_parms *cmd, void *mconfig,
return NULL;
}
+static const char *wsgi_set_python_hash_seed(cmd_parms *cmd, void *mconfig,
+ const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ /*
+ * Must check this here because if we don't and is wrong, then
+ * Python interpreter will check later and may kill the process.
+ */
+
+ if (f && *f != '\0' && strcmp(f, "random") != 0) {
+ const char *endptr = f;
+ unsigned long seed;
+
+ seed = PyOS_strtoul((char *)f, (char **)&endptr, 10);
+
+ if (*endptr != '\0' || seed > 4294967295UL
+ || (errno == ERANGE && seed == ULONG_MAX))
+ {
+ return "WSGIPythonHashSeed must be \"random\" or an integer "
+ "in range [0; 4294967295]";
+ }
+ }
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->python_hash_seed = f;
+
+ return NULL;
+}
+
static const char *wsgi_set_restrict_embedded(cmd_parms *cmd, void *mconfig,
const char *f)
{
@@ -7698,7 +4439,6 @@ static const char *wsgi_set_callable_object(cmd_parms *cmd, void *mconfig,
static const char *wsgi_add_import_script(cmd_parms *cmd, void *mconfig,
const char *args)
{
- const char *error = NULL;
WSGIScriptFile *object = NULL;
const char *option = NULL;
@@ -8139,6 +4879,7 @@ static const char *wsgi_set_auth_group_script(cmd_parms *cmd, void *mconfig,
return NULL;
}
+#if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER)
static const char *wsgi_set_user_authoritative(cmd_parms *cmd, void *mconfig,
const char *f)
{
@@ -8154,6 +4895,7 @@ static const char *wsgi_set_user_authoritative(cmd_parms *cmd, void *mconfig,
return NULL;
}
+#endif
static const char *wsgi_set_group_authoritative(cmd_parms *cmd, void *mconfig,
const char *f)
@@ -8171,12 +4913,9 @@ static const char *wsgi_set_group_authoritative(cmd_parms *cmd, void *mconfig,
return NULL;
}
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
static const char *wsgi_add_handler_script(cmd_parms *cmd, void *mconfig,
const char *args)
{
- WSGIServerConfig *sconfig = NULL;
- WSGIDirectoryConfig *dconfig = NULL;
WSGIScriptFile *object = NULL;
const char *name = NULL;
@@ -8253,8 +4992,8 @@ static const char *wsgi_add_handler_script(cmd_parms *cmd, void *mconfig,
return NULL;
}
-static const char *wsgi_set_dont_write_bytecode(cmd_parms *cmd, void *mconfig,
- const char *f)
+static const char *wsgi_set_newrelic_config_file(
+ cmd_parms *cmd, void *mconfig, const char *f)
{
const char *error = NULL;
WSGIServerConfig *sconfig = NULL;
@@ -8264,17 +5003,26 @@ static const char *wsgi_set_dont_write_bytecode(cmd_parms *cmd, void *mconfig,
return error;
sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->newrelic_config_file = f;
- if (strcasecmp(f, "Off") == 0)
- sconfig->dont_write_bytecode = 0;
- else if (strcasecmp(f, "On") == 0)
- sconfig->dont_write_bytecode = 1;
- else
- return "WSGIDontWriteBytecode must be one of: Off | On";
+ return NULL;
+}
+
+static const char *wsgi_set_newrelic_environment(
+ cmd_parms *cmd, void *mconfig, const char *f)
+{
+ const char *error = NULL;
+ WSGIServerConfig *sconfig = NULL;
+
+ error = ap_check_cmd_context(cmd, GLOBAL_ONLY);
+ if (error != NULL)
+ return error;
+
+ sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module);
+ sconfig->newrelic_environment = f;
return NULL;
}
-#endif
/* Handler for the translate name phase. */
@@ -8420,7 +5168,7 @@ static void wsgi_log_script_error(request_rec *r, const char *e, const char *n)
message = apr_psprintf(r->pool, "%s: %s", e, n);
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "%s", message);
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "%s", message);
}
static void wsgi_build_environment(request_rec *r)
@@ -8459,24 +5207,17 @@ static void wsgi_build_environment(request_rec *r)
* might change the content and/or headers.
*/
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (r->method_number == M_GET && r->header_only &&
r->output_filters->frec->ftype < AP_FTYPE_PROTOCOL)
apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET");
-#else
- if (r->method_number == M_GET && r->header_only)
- apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET");
-#endif
/* Determine whether connection uses HTTPS protocol. */
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
if (!wsgi_is_https)
wsgi_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
if (wsgi_is_https && wsgi_is_https(r->connection))
apr_table_set(r->subprocess_env, "HTTPS", "1");
-#endif
/*
* If enabled, pass along authorisation headers which Apache
@@ -8570,10 +5311,8 @@ static void wsgi_build_environment(request_rec *r)
apr_table_setn(r->subprocess_env, "mod_wsgi.enable_sendfile",
apr_psprintf(r->pool, "%d", config->enable_sendfile));
-#if defined(MOD_WSGI_WITH_DAEMONS)
apr_table_setn(r->subprocess_env, "mod_wsgi.queue_start",
apr_psprintf(r->pool, "%" APR_TIME_T_FMT, r->request_time));
-#endif
}
typedef struct {
@@ -8687,7 +5426,8 @@ static PyObject *Dispatch_environ(DispatchObject *self, const char *group)
*/
if (!wsgi_daemon_pool && self->config->pass_apache_request) {
-#if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2
+#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \
+ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7)
object = PyCapsule_New(self->r, 0, 0);
#else
object = PyCObject_FromVoidPtr(self->r, 0);
@@ -8702,7 +5442,6 @@ static PyObject *Dispatch_environ(DispatchObject *self, const char *group)
*/
#if 0
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https");
PyDict_SetItemString(vars, "mod_ssl.is_https", object);
Py_DECREF(object);
@@ -8711,13 +5450,10 @@ static PyObject *Dispatch_environ(DispatchObject *self, const char *group)
PyDict_SetItemString(vars, "mod_ssl.var_lookup", object);
Py_DECREF(object);
#endif
-#endif
return vars;
}
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-
static PyObject *Dispatch_ssl_is_https(DispatchObject *self, PyObject *args)
{
APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0;
@@ -8805,13 +5541,9 @@ static PyObject *Dispatch_ssl_var_lookup(DispatchObject *self, PyObject *args)
#endif
}
-#endif
-
static PyMethodDef Dispatch_methods[] = {
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
{ "ssl_is_https", (PyCFunction)Dispatch_ssl_is_https, METH_VARARGS, 0 },
{ "ssl_var_lookup", (PyCFunction)Dispatch_ssl_var_lookup, METH_VARARGS, 0 },
-#endif
{ NULL, NULL}
};
@@ -8880,7 +5612,7 @@ static int wsgi_execute_dispatch(request_rec *r)
&wsgi_module);
if (!config->dispatch_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI dispatch "
"script not provided.", getpid());
@@ -8898,7 +5630,7 @@ static int wsgi_execute_dispatch(request_rec *r)
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -9337,33 +6069,17 @@ static int wsgi_hook_handler(request_rec *r)
/* Ensure target script exists and is a file. */
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- if (r->finfo.st_mode == 0) {
- wsgi_log_script_error(r, "Target WSGI script not found or unable "
- "to stat", r->filename);
- return HTTP_NOT_FOUND;
- }
-#else
if (r->finfo.filetype == 0) {
wsgi_log_script_error(r, "Target WSGI script not found or unable "
"to stat", r->filename);
return HTTP_NOT_FOUND;
}
-#endif
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
- if (S_ISDIR(r->finfo.st_mode)) {
- wsgi_log_script_error(r, "Attempt to invoke directory as WSGI "
- "application", r->filename);
- return HTTP_FORBIDDEN;
- }
-#else
if (r->finfo.filetype == APR_DIR) {
wsgi_log_script_error(r, "Attempt to invoke directory as WSGI "
"application", r->filename);
return HTTP_FORBIDDEN;
}
-#endif
if (wsgi_is_script_aliased(r)) {
/*
@@ -9372,15 +6088,15 @@ static int wsgi_hook_handler(request_rec *r)
* configuration supplied with WSGIScriptAlias directives.
*/
- if (value = apr_table_get(r->notes, "mod_wsgi.process_group"))
+ if ((value = apr_table_get(r->notes, "mod_wsgi.process_group")))
config->process_group = wsgi_process_group(r, value);
- if (value = apr_table_get(r->notes, "mod_wsgi.application_group"))
+ if ((value = apr_table_get(r->notes, "mod_wsgi.application_group")))
config->application_group = wsgi_application_group(r, value);
- if (value = apr_table_get(r->notes, "mod_wsgi.callable_object"))
+ if ((value = apr_table_get(r->notes, "mod_wsgi.callable_object")))
config->callable_object = value;
- if (value = apr_table_get(r->notes,
- "mod_wsgi.pass_authorization")) {
+ if ((value = apr_table_get(r->notes,
+ "mod_wsgi.pass_authorization"))) {
if (!strcmp(value, "1"))
config->pass_authorization = 1;
else
@@ -9388,7 +6104,12 @@ static int wsgi_hook_handler(request_rec *r)
}
}
}
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
+#if 0
+ else if (strstr(r->handler, "wsgi-handler=") == r->handler) {
+ config->handler_script = apr_pstrcat(r->pool, r->handler+13, NULL);
+ config->callable_object = "handle_request";
+ }
+#endif
else if (config->handler_scripts) {
WSGIScriptFile *entry;
@@ -9400,12 +6121,12 @@ static int wsgi_hook_handler(request_rec *r)
config->handler_script = entry->handler_script;
config->callable_object = "handle_request";
- if (value = entry->process_group)
+ if ((value = entry->process_group))
config->process_group = wsgi_process_group(r, value);
- if (value = entry->application_group)
+ if ((value = entry->application_group))
config->application_group = wsgi_application_group(r, value);
- if (value = entry->pass_authorization) {
+ if ((value = entry->pass_authorization)) {
if (!strcmp(value, "1"))
config->pass_authorization = 1;
else
@@ -9415,14 +6136,12 @@ static int wsgi_hook_handler(request_rec *r)
else
return DECLINED;
}
-#endif
else
return DECLINED;
/*
- * For Apache 2.0+ honour AcceptPathInfo directive. Default
- * behaviour is accept additional path information. Under
- * Apache 1.3, WSGI application would need to check itself.
+ * Honour AcceptPathInfo directive. Default behaviour is
+ * accept additional path information.
*/
#if AP_MODULE_MAGIC_AT_LEAST(20011212,0)
@@ -9518,195 +6237,6 @@ static int wsgi_hook_handler(request_rec *r)
return wsgi_execute_script(r);
}
-#if AP_SERVER_MAJORVERSION_NUMBER < 2
-
-/*
- * Apache 1.3 module initialisation functions.
- */
-
-static void wsgi_hook_init(server_rec *s, apr_pool_t *p)
-{
- char package[128];
-
- /* Setup module version information. */
-
- sprintf(package, "mod_wsgi/%s", MOD_WSGI_VERSION_STRING);
-
- ap_add_version_component(package);
-
- /* Record Python version string with Apache. */
-
- if (!Py_IsInitialized()) {
- char buffer[256];
- const char *token = NULL;
- const char *version = NULL;
-
- version = Py_GetVersion();
-
- token = version;
- while (*token && *token != ' ')
- token++;
-
- strcpy(buffer, "Python/");
- strncat(buffer, version, token - version);
-
- ap_add_version_component(buffer);
- }
-
- /* Retain reference to base server. */
-
- wsgi_server = s;
-
- /* Retain record of parent process ID. */
-
- wsgi_parent_pid = getpid();
-
- /* Determine whether multiprocess and/or multithreaded. */
-
- wsgi_multiprocess = 1;
- wsgi_multithread = 0;
-
- /* Retain reference to main server config. */
-
- wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module);
-
- /*
- * Check that the version of Python found at
- * runtime is what was used at compilation.
- */
-
- wsgi_python_version();
-
- /*
- * Initialise Python if required to be done in
- * the parent process. Note that it will not be
- * initialised if mod_python loaded and it has
- * already been done.
- */
-
- if (!wsgi_python_after_fork)
- wsgi_python_init(p);
-}
-
-static void wsgi_hook_child_init(server_rec *s, apr_pool_t *p)
-{
- if (wsgi_python_required) {
- /*
- * Initialise Python if required to be done in
- * the child process. Note that it will not be
- * initialised if mod_python loaded and it has
- * already been done.
- */
-
- if (wsgi_python_after_fork)
- wsgi_python_init(p);
-
- /*
- * Now perform additional initialisation steps
- * always done in child process.
- */
-
- wsgi_python_child_init(p);
- }
-}
-
-/* Dispatch list of content handlers */
-static const handler_rec wsgi_handlers[] = {
- { "wsgi-script", wsgi_hook_handler },
- { "application/x-httpd-wsgi", wsgi_hook_handler },
- { NULL, NULL }
-};
-
-static const command_rec wsgi_commands[] =
-{
- { "WSGIScriptAlias", wsgi_add_script_alias, NULL,
- RSRC_CONF, RAW_ARGS, "Map location to target WSGI script file." },
- { "WSGIScriptAliasMatch", wsgi_add_script_alias, "*",
- RSRC_CONF, RAW_ARGS, "Map location to target WSGI script file." },
-
- { "WSGIVerboseDebugging", wsgi_set_verbose_debugging, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable verbose debugging messages." },
-
-#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
- { "WSGIPy3kWarningFlag", wsgi_set_py3k_warning_flag, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable Python 3.0 warnings." },
- { "WSGIDontWriteBytecode", wsgi_set_dont_write_bytecode, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable writing of byte code." },
-#endif
-
- { "WSGIPythonWarnings", wsgi_add_python_warnings, NULL,
- RSRC_CONF, TAKE1, "Control Python warning messages." },
- { "WSGIPythonOptimize", wsgi_set_python_optimize, NULL,
- RSRC_CONF, TAKE1, "Set level of Python compiler optimisations." },
- { "WSGIPythonHome", wsgi_set_python_home, NULL,
- RSRC_CONF, TAKE1, "Python prefix/exec_prefix absolute path names." },
- { "WSGIPythonPath", wsgi_set_python_path, NULL,
- RSRC_CONF, TAKE1, "Python module search path." },
- { "WSGIPythonEggs", wsgi_set_python_eggs, NULL,
- RSRC_CONF, TAKE1, "Python eggs cache directory." },
-
- { "WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDIN." },
- { "WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDOUT." },
- { "WSGIRestrictSignal", wsgi_set_restrict_signal, NULL,
- RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of signal()." },
-
- { "WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL,
- RSRC_CONF, TAKE1, "Define whether file system is case sensitive." },
-
- { "WSGIApplicationGroup", wsgi_set_application_group, NULL,
- ACCESS_CONF|RSRC_CONF, TAKE1, "Application interpreter group." },
- { "WSGICallableObject", wsgi_set_callable_object, NULL,
- OR_FILEINFO, TAKE1, "Name of entry point in WSGI script file." },
-
- { "WSGIImportScript", wsgi_add_import_script, NULL,
- RSRC_CONF, RAW_ARGS, "Location of WSGI import script." },
- { "WSGIDispatchScript", wsgi_set_dispatch_script, NULL,
- ACCESS_CONF|RSRC_CONF, RAW_ARGS, "Location of WSGI dispatch script." },
-
- { "WSGIPassAuthorization", wsgi_set_pass_authorization, NULL,
- OR_FILEINFO, TAKE1, "Enable/Disable WSGI authorization." },
- { "WSGIScriptReloading", wsgi_set_script_reloading, NULL,
- OR_FILEINFO, TAKE1, "Enable/Disable script reloading mechanism." },
- { "WSGIChunkedRequest", wsgi_set_chunked_request, NULL,
- OR_FILEINFO, TAKE1, "Enable/Disable support for chunked request." },
-
- { NULL }
-};
-
-/* Dispatch list for API hooks */
-
-module MODULE_VAR_EXPORT wsgi_module = {
- STANDARD_MODULE_STUFF,
- wsgi_hook_init, /* module initializer */
- wsgi_create_dir_config, /* create per-dir config structures */
- wsgi_merge_dir_config, /* merge per-dir config structures */
- wsgi_create_server_config, /* create per-server config structures */
- wsgi_merge_server_config, /* merge per-server config structures */
- wsgi_commands, /* table of config file commands */
- wsgi_handlers, /* [#8] MIME-typed-dispatched handlers */
- wsgi_hook_intercept, /* [#1] URI to filename translation */
- NULL, /* [#4] validate user id from request */
- NULL, /* [#5] check if the user is ok _here_ */
- NULL, /* [#3] check access by host address */
- NULL, /* [#6] determine MIME type */
- NULL, /* [#7] pre-run fixups */
- NULL, /* [#9] log a transaction */
- NULL, /* [#2] header parser */
- wsgi_hook_child_init, /* child_init */
- NULL, /* child_exit */
- NULL /* [#0] post read-request */
-#ifdef EAPI
- ,NULL, /* EAPI: add_module */
- NULL, /* EAPI: remove_module */
- NULL, /* EAPI: rewrite_command */
- NULL /* EAPI: new_connection */
-#endif
-};
-
-#else
-
/*
* Apache 2.X and UNIX specific code for creation and management
* of distinct daemon processes.
@@ -9738,9 +6268,14 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
int stack_size = 0;
int maximum_requests = 0;
+ int blocked_requests = 0;
int shutdown_timeout = 5;
int deadlock_timeout = 300;
int inactivity_timeout = 0;
+ int blocked_timeout = 0;
+ int graceful_timeout = 0;
+
+ int listen_backlog = WSGI_LISTEN_BACKLOG;
const char *display_name = NULL;
@@ -9763,6 +6298,9 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
int groups_count = 0;
gid_t *groups = NULL;
+ const char *newrelic_config_file = NULL;
+ const char *newrelic_environment = NULL;
+
const char *option = NULL;
const char *value = NULL;
@@ -9899,6 +6437,14 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
if (maximum_requests < 0)
return "Invalid request count for WSGI daemon process.";
}
+ else if (!strcmp(option, "blocked-requests")) {
+ if (!*value)
+ return "Invalid blocked count for WSGI daemon process.";
+
+ blocked_requests = atoi(value);
+ if (blocked_requests < 0)
+ return "Invalid blocked count for WSGI daemon process.";
+ }
else if (!strcmp(option, "shutdown-timeout")) {
if (!*value)
return "Invalid shutdown timeout for WSGI daemon process.";
@@ -9923,6 +6469,30 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
if (inactivity_timeout < 0)
return "Invalid inactivity timeout for WSGI daemon process.";
}
+ else if (!strcmp(option, "blocked-timeout")) {
+ if (!*value)
+ return "Invalid process timeout for WSGI daemon process.";
+
+ blocked_timeout = atoi(value);
+ if (blocked_timeout < 0)
+ return "Invalid process timeout for WSGI daemon process.";
+ }
+ else if (!strcmp(option, "graceful-timeout")) {
+ if (!*value)
+ return "Invalid graceful timeout for WSGI daemon process.";
+
+ graceful_timeout = atoi(value);
+ if (graceful_timeout < 0)
+ return "Invalid graceful timeout for WSGI daemon process.";
+ }
+ else if (!strcmp(option, "listen-backlog")) {
+ if (!*value)
+ return "Invalid listen backlog for WSGI daemon process.";
+
+ listen_backlog = atoi(value);
+ if (listen_backlog < 0)
+ return "Invalid listen backlog for WSGI daemon process.";
+ }
else if (!strcmp(option, "display-name")) {
display_name = value;
}
@@ -10014,6 +6584,12 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
if (virtual_memory_limit < 0)
return "Invalid virtual memory limit for WSGI daemon process.";
}
+ else if (!strcmp(option, "newrelic-config-file")) {
+ newrelic_config_file = value;
+ }
+ else if (!strcmp(option, "newrelic-environment")) {
+ newrelic_environment = value;
+ }
else
return "Invalid option to WSGI daemon process definition.";
}
@@ -10062,6 +6638,9 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
return "Name duplicates previous WSGI daemon definition.";
}
+ if (blocked_requests == 0 || blocked_requests > threads)
+ blocked_requests = threads;
+
wsgi_daemon_count++;
entry = (WSGIProcessGroup *)apr_array_push(wsgi_daemon_list);
@@ -10099,9 +6678,14 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
entry->stack_size = stack_size;
entry->maximum_requests = maximum_requests;
+ entry->blocked_requests = blocked_requests;
entry->shutdown_timeout = shutdown_timeout;
entry->deadlock_timeout = apr_time_from_sec(deadlock_timeout);
entry->inactivity_timeout = apr_time_from_sec(inactivity_timeout);
+ entry->blocked_timeout = apr_time_from_sec(blocked_timeout);
+ entry->graceful_timeout = apr_time_from_sec(graceful_timeout);
+
+ entry->listen_backlog = listen_backlog;
entry->display_name = display_name;
@@ -10117,6 +6701,9 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig,
entry->memory_limit = memory_limit;
entry->virtual_memory_limit = virtual_memory_limit;
+ entry->newrelic_config_file = newrelic_config_file;
+ entry->newrelic_environment = newrelic_environment;
+
entry->listener_fd = -1;
return NULL;
@@ -10194,7 +6781,7 @@ static const char *wsgi_set_accept_mutex(cmd_parms *cmd, void *mconfig,
sconfig->lock_mechanism = APR_LOCK_FCNTL;
}
#endif
-#if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM)
+#if APR_HAS_SYSVSEM_SERIALIZE
else if (!strcasecmp(arg, "sysvsem")) {
sconfig->lock_mechanism = APR_LOCK_SYSVSEM;
}
@@ -10221,19 +6808,27 @@ static const char *wsgi_set_accept_mutex(cmd_parms *cmd, void *mconfig,
static apr_file_t *wsgi_signal_pipe_in = NULL;
static apr_file_t *wsgi_signal_pipe_out = NULL;
-static int wsgi_cpu_time_limit_exceeded = 0;
-
static void wsgi_signal_handler(int signum)
{
apr_size_t nbytes = 1;
- if (signum == SIGXCPU)
- wsgi_cpu_time_limit_exceeded = 1;
+ if (signum == AP_SIG_GRACEFUL) {
+ apr_file_write(wsgi_signal_pipe_out, "G", &nbytes);
+ apr_file_flush(wsgi_signal_pipe_out);
+ }
+ else if (signum == SIGXCPU) {
+ if (!wsgi_graceful_timeout)
+ wsgi_daemon_shutdown++;
- apr_file_write(wsgi_signal_pipe_out, "X", &nbytes);
- apr_file_flush(wsgi_signal_pipe_out);
+ apr_file_write(wsgi_signal_pipe_out, "C", &nbytes);
+ apr_file_flush(wsgi_signal_pipe_out);
+ }
+ else {
+ wsgi_daemon_shutdown++;
- wsgi_daemon_shutdown++;
+ apr_file_write(wsgi_signal_pipe_out, "S", &nbytes);
+ apr_file_flush(wsgi_signal_pipe_out);
+ }
}
static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon);
@@ -10250,17 +6845,14 @@ static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
int mpm_state;
int stopping;
- /* Stop watching the existing process. */
-
- apr_proc_other_child_unregister(daemon);
-
/*
* Determine if Apache is being shutdown or not and
- * if it is not being shutdown, restart the child
- * daemon process that has died. If MPM doesn't
- * support query assume that child daemon process
- * shouldn't be restarted. Both prefork and worker
- * MPMs support this query so should always be okay.
+ * if it is not being shutdown, we will need to
+ * restart the child daemon process that has died.
+ * If MPM doesn't support query assume that child
+ * daemon process shouldn't be restarted. Both
+ * prefork and worker MPMs support this query so
+ * should always be okay.
*/
stopping = 1;
@@ -10271,13 +6863,43 @@ static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
}
if (!stopping) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0),
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' has died, deregister and "
+ "restart it.", daemon->process.pid,
+ daemon->group->name);
+
+ if (WIFEXITED(status)) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' terminated normally, exit code %d",
+ daemon->process.pid, daemon->group->name,
+ WEXITSTATUS(status));
+ }
+ else if (WIFSIGNALED(status)) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
- "Process '%s' has died, restarting.",
+ "Process '%s' terminated by signal %d",
+ daemon->process.pid, daemon->group->name,
+ WTERMSIG(status));
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' has died but server is "
+ "being stopped, deregister it.",
daemon->process.pid, daemon->group->name);
+ }
+
+ /* Deregister existing process so we stop watching it. */
+ apr_proc_other_child_unregister(daemon);
+
+ /* Now restart process if not shutting down. */
+
+ if (!stopping)
wsgi_start_process(wsgi_parent_pool, daemon);
- }
break;
}
@@ -10286,7 +6908,13 @@ static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
case APR_OC_REASON_RESTART: {
- /* Stop watching the existing process. */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' to be deregistered, as server is "
+ "restarting or being shutdown.",
+ daemon->process.pid, daemon->group->name);
+
+ /* Deregister existing process so we stop watching it. */
apr_proc_other_child_unregister(daemon);
@@ -10297,17 +6925,18 @@ static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
case APR_OC_REASON_LOST: {
- /* Stop watching the existing process. */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' appears to have been lost, "
+ "deregister and restart it.",
+ daemon->process.pid, daemon->group->name);
+
+ /* Deregister existing process so we stop watching it. */
apr_proc_other_child_unregister(daemon);
/* Restart the child daemon process that has died. */
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0),
- wsgi_server, "mod_wsgi (pid=%d): "
- "Process '%s' has died, restarting.",
- daemon->process.pid, daemon->group->name);
-
wsgi_start_process(wsgi_parent_pool, daemon);
break;
@@ -10319,8 +6948,21 @@ static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
/* Nothing to do at present. */
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' has been deregistered and will "
+ "no longer be monitored.", daemon->process.pid,
+ daemon->group->name);
+
break;
}
+
+ default: {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Process '%s' targeted by unexpected event %d.",
+ daemon->process.pid, daemon->group->name, reason);
+ }
}
}
@@ -10380,7 +7022,7 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
if (daemon->group->root) {
if (chroot(daemon->group->root) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change root "
"directory to '%s'.", getpid(), daemon->group->root);
}
@@ -10390,7 +7032,7 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
if (daemon->group->home) {
if (chdir(daemon->group->home) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), daemon->group->home);
}
@@ -10402,13 +7044,13 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
if (pwent) {
if (chdir(pwent->pw_dir) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), pwent->pw_dir);
}
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to determine home "
"directory for uid=%ld.", getpid(), (long)geteuid());
}
@@ -10420,13 +7062,13 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
if (pwent) {
if (chdir(pwent->pw_dir) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to change working "
"directory to '%s'.", getpid(), pwent->pw_dir);
}
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to determine home "
"directory for uid=%ld.", getpid(),
(long)daemon->group->uid);
@@ -10441,7 +7083,7 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
/* Setup the daemon process real and effective group. */
if (setgid(daemon->group->gid) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Unable to set group id to gid=%u.",
getpid(), (unsigned)daemon->group->gid);
}
@@ -10449,7 +7091,7 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
if (daemon->group->groups) {
if (setgroups(daemon->group->groups_count,
daemon->group->groups) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno),
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno,
wsgi_server, "mod_wsgi (pid=%d): Unable "
"to set supplementary groups for uname=%s "
"of '%s'.", getpid(), daemon->group->user,
@@ -10457,7 +7099,7 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
}
}
else if (initgroups(daemon->group->user, daemon->group->gid) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno),
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno,
wsgi_server, "mod_wsgi (pid=%d): Unable "
"to set groups for uname=%s and gid=%u.", getpid(),
daemon->group->user, (unsigned)daemon->group->gid);
@@ -10471,6 +7113,23 @@ static void wsgi_setup_access(WSGIDaemonProcess *daemon)
"mod_wsgi (pid=%d): Unable to change to uid=%ld.",
getpid(), (long)daemon->group->uid);
}
+
+ /*
+ * Linux prevents generation of core dumps after setuid()
+ * has been used. Attempt to reenable ability to dump core
+ * so that the CoreDumpDirectory directive still works.
+ */
+
+#if defined(HAVE_PRCTL) && defined(PR_SET_DUMPABLE)
+ /* This applies to Linux 2.4 and later. */
+ if (ap_coredumpdir_configured) {
+ if (prctl(PR_SET_DUMPABLE, 1)) {
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
+ "mod_wsgi (pid=%d): Set dumpable failed. This child "
+ "will not coredump after software errors.", getpid());
+ }
+ }
+#endif
}
static int wsgi_setup_socket(WSGIProcessGroup *process)
@@ -10483,12 +7142,12 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
int sendsz = process->send_buffer_size;
int recvsz = process->recv_buffer_size;
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Socket for '%s' is '%s'.",
getpid(), process->name, process->socket);
if ((sockfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create unix domain "
"socket.", getpid());
return -1;
@@ -10498,7 +7157,7 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
if (sendsz) {
if (setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
(void *)&sendsz, sizeof(sendsz)) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server,
"mod_wsgi (pid=%d): Failed to set send buffer "
"size on daemon process socket.", getpid());
}
@@ -10508,7 +7167,7 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
if (recvsz) {
if (setsockopt(sockfd, SOL_SOCKET, SO_RCVBUF,
(void *)&recvsz, sizeof(recvsz)) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server,
"mod_wsgi (pid=%d): Failed to set receive buffer "
"size on daemon process socket.", getpid());
}
@@ -10523,7 +7182,7 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr));
if (rc < 0 && errno == EADDRINUSE) {
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server,
"mod_wsgi (pid=%d): Removing stale unix domain "
"socket '%s'.", getpid(), process->socket);
@@ -10535,14 +7194,18 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
umask(omask);
if (rc < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't bind unix domain "
"socket '%s'.", getpid(), process->socket);
return -1;
}
- if (listen(sockfd, WSGI_LISTEN_BACKLOG) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Listen backlog for socket '%s' is '%d'.",
+ getpid(), process->socket, process->listen_backlog);
+
+ if (listen(sockfd, process->listen_backlog) < 0) {
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't listen on unix domain "
"socket.", getpid());
return -1;
@@ -10570,7 +7233,7 @@ static int wsgi_setup_socket(WSGIProcessGroup *process)
#else
if (chown(process->socket, ap_unixd_config.user_id, -1) < 0) {
#endif
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't change owner of unix "
"domain socket '%s'.", getpid(),
process->socket);
@@ -10600,7 +7263,13 @@ static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock,
* will add their own input/output filters to the chain.
*/
+#if AP_MODULE_MAGIC_AT_LEAST(20110619,0)
+ /* For 2.4 a NULL sbh pointer should work. */
+ sbh = NULL;
+#else
+ /* For 2.2 a dummy sbh pointer is needed. */
ap_create_sb_handle(&sbh, p, -1, 0);
+#endif
c = (conn_rec *)apr_pcalloc(p, sizeof(conn_rec));
@@ -10612,7 +7281,7 @@ static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock,
if ((rv = apr_socket_addr_get(&c->local_addr, APR_LOCAL, sock))
!= APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server,
"mod_wsgi (pid=%d): Failed call "
"apr_socket_addr_get(APR_LOCAL).", getpid());
apr_socket_close(sock);
@@ -10633,7 +7302,7 @@ static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock,
#else
if ((rv = apr_socket_addr_get(&c->remote_addr, APR_REMOTE, sock))
!= APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server,
"mod_wsgi (pid=%d): Failed call "
"apr_socket_addr_get(APR_REMOTE).", getpid());
apr_socket_close(sock);
@@ -10651,7 +7320,7 @@ static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock,
rv = apr_socket_timeout_set(sock, c->base_server->timeout);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, wsgi_server,
"mod_wsgi (pid=%d): Failed call "
"apr_socket_timeout_set().", getpid());
}
@@ -10710,7 +7379,7 @@ static apr_status_t wsgi_worker_acquire(int id)
rv = apr_thread_cond_wait(thread->condition, thread->mutex);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv,
wsgi_server, "mod_wsgi (pid=%d): "
"Wait on thread %d wakeup condition variable "
"failed.", getpid(), id);
@@ -10723,7 +7392,7 @@ static apr_status_t wsgi_worker_acquire(int id)
}
}
-static apr_status_t wsgi_worker_release()
+static apr_status_t wsgi_worker_release(void)
{
WSGIThreadStack *stack = wsgi_worker_stack;
@@ -10772,7 +7441,7 @@ static apr_status_t wsgi_worker_release()
}
}
-static apr_status_t wsgi_worker_shutdown()
+static apr_status_t wsgi_worker_shutdown(void)
{
int i;
apr_status_t rv;
@@ -10815,9 +7484,6 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
while (!wsgi_daemon_shutdown) {
apr_status_t rv;
- apr_time_t start;
- apr_time_t duration;
-
/*
* Only allow one thread in this process to attempt to
* acquire the global process lock as the global process
@@ -10843,6 +7509,7 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
rv = apr_proc_mutex_lock(group->mutex);
if (rv != APR_SUCCESS) {
+#if 0
#if defined(EIDRM)
/*
* When using multiple threads locking the
@@ -10866,14 +7533,16 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
wsgi_daemon_shutdown = 1;
}
#endif
+#endif
if (!wsgi_daemon_shutdown) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't acquire accept mutex '%s'. "
"Shutting down daemon process.",
getpid(), group->socket);
+ wsgi_daemon_shutdown++;
kill(getpid(), SIGTERM);
sleep(5);
}
@@ -10919,7 +7588,7 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv,
wsgi_server, "mod_wsgi (pid=%d): "
"Unable to poll daemon socket for '%s'. "
"Shutting down daemon process.",
@@ -10965,7 +7634,7 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
if (!wsgi_daemon_shutdown) {
wsgi_worker_release();
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't release accept mutex '%s'.",
getpid(), group->socket);
@@ -10989,6 +7658,8 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
/* Process the request proxied from the child process. */
+ wsgi_start_request();
+
bucket_alloc = apr_bucket_alloc_create(ptrans);
wsgi_process_socket(ptrans, socket, bucket_alloc, daemon);
@@ -11000,20 +7671,59 @@ static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread)
/* Check to see if maximum number of requests reached. */
+ wsgi_end_request();
+
if (daemon->group->maximum_requests) {
if (--wsgi_request_count <= 0) {
- if (!wsgi_daemon_shutdown) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ if (wsgi_graceful_timeout && wsgi_active_requests) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Maximum requests "
- "reached '%s'.", getpid(),
- daemon->group->name);
+ "reached, attempt a graceful shutdown "
+ "'%s'.", getpid(), daemon->group->name);
+
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+ wsgi_graceful_shutdown_time = apr_time_now();
+ wsgi_graceful_shutdown_time += wsgi_graceful_timeout;
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
}
+ else {
+ if (!wsgi_daemon_shutdown) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Maximum requests "
+ "reached, triggering immediate shutdown "
+ "'%s'.", getpid(), daemon->group->name);
+ }
+
+ wsgi_daemon_shutdown++;
+ kill(getpid(), SIGINT);
+ }
+ }
+ }
+ else if (wsgi_daemon_graceful && !wsgi_daemon_shutdown) {
+ if (wsgi_active_requests == 0) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Requests have completed, "
+ "triggering immediate shutdown '%s'.",
+ getpid(), daemon->group->name);
wsgi_daemon_shutdown++;
kill(getpid(), SIGINT);
}
}
}
+
+ if (wsgi_server_config->verbose_debugging) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exiting thread %d in daemon "
+ "process '%s'.", getpid(), thread->id,
+ thread->process->group->name);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exiting thread %d in daemon "
+ "process '%s'.", getpid(), thread->id,
+ thread->process->group->name);
+ }
}
static void *wsgi_daemon_thread(apr_thread_t *thd, void *data)
@@ -11021,6 +7731,19 @@ static void *wsgi_daemon_thread(apr_thread_t *thd, void *data)
WSGIDaemonThread *thread = data;
apr_pool_t *p = apr_thread_pool_get(thd);
+ if (wsgi_server_config->verbose_debugging) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Started thread %d in daemon "
+ "process '%s'.", getpid(), thread->id,
+ thread->process->group->name);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Started thread %d in daemon "
+ "process '%s'.", getpid(), thread->id,
+ thread->process->group->name);
+ }
+
apr_thread_mutex_lock(thread->mutex);
wsgi_daemon_worker(p, thread);
@@ -11036,7 +7759,7 @@ static void *wsgi_reaper_thread(apr_thread_t *thd, void *data)
sleep(daemon->group->shutdown_timeout);
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Aborting process '%s'.",
getpid(), daemon->group->name);
@@ -11052,15 +7775,15 @@ static void *wsgi_deadlock_thread(apr_thread_t *thd, void *data)
PyGILState_STATE gilstate;
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Enable deadlock thread in "
"process '%s'.", getpid(), daemon->group->name);
}
- apr_thread_mutex_lock(wsgi_shutdown_lock);
+ apr_thread_mutex_lock(wsgi_monitor_lock);
wsgi_deadlock_shutdown_time = apr_time_now();
wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
while (1) {
apr_sleep(apr_time_from_sec(1));
@@ -11068,10 +7791,10 @@ static void *wsgi_deadlock_thread(apr_thread_t *thd, void *data)
gilstate = PyGILState_Ensure();
PyGILState_Release(gilstate);
- apr_thread_mutex_lock(wsgi_shutdown_lock);
+ apr_thread_mutex_lock(wsgi_monitor_lock);
wsgi_deadlock_shutdown_time = apr_time_now();
wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
}
return NULL;
@@ -11080,44 +7803,55 @@ static void *wsgi_deadlock_thread(apr_thread_t *thd, void *data)
static void *wsgi_monitor_thread(apr_thread_t *thd, void *data)
{
WSGIDaemonProcess *daemon = data;
+ WSGIProcessGroup *group = daemon->group;
int restart = 0;
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Enable monitor thread in "
- "process '%s'.", getpid(), daemon->group->name);
+ "process '%s'.", getpid(), group->name);
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Deadlock timeout is %d.",
getpid(), (int)(apr_time_sec(wsgi_deadlock_timeout)));
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
- "mod_wsgi (pid=%d): Inactivity timeout is %d.",
- getpid(), (int)(apr_time_sec(wsgi_inactivity_timeout)));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Idle inactivity timeout is %d.",
+ getpid(), (int)(apr_time_sec(wsgi_idle_timeout)));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Busy inactivity timeout is %d.",
+ getpid(), (int)(apr_time_sec(wsgi_busy_timeout)));
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Graceful timeout is %d.",
+ getpid(), (int)(apr_time_sec(wsgi_graceful_timeout)));
}
while (1) {
apr_time_t now;
apr_time_t deadlock_time;
- apr_time_t inactivity_time;
+ apr_time_t idle_time;
+ apr_time_t busy_time;
+ apr_time_t graceful_time;
apr_interval_time_t period = 0;
now = apr_time_now();
- apr_thread_mutex_lock(wsgi_shutdown_lock);
+ apr_thread_mutex_lock(wsgi_monitor_lock);
deadlock_time = wsgi_deadlock_shutdown_time;
- inactivity_time = wsgi_inactivity_shutdown_time;
- apr_thread_mutex_unlock(wsgi_shutdown_lock);
+ idle_time = wsgi_idle_shutdown_time;
+ busy_time = wsgi_busy_shutdown_time;
+ graceful_time = wsgi_graceful_shutdown_time;
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
if (!restart && wsgi_deadlock_timeout) {
if (deadlock_time) {
if (deadlock_time <= now) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Daemon process deadlock "
"timer expired, stopping process '%s'.",
- getpid(), daemon->group->name);
+ getpid(), group->name);
restart = 1;
}
@@ -11130,25 +7864,92 @@ static void *wsgi_monitor_thread(apr_thread_t *thd, void *data)
}
}
- if (!restart && wsgi_inactivity_timeout) {
- if (inactivity_time) {
- if (inactivity_time <= now) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ if (!restart && wsgi_idle_timeout) {
+ if (idle_time) {
+ if (idle_time <= now) {
+ if (wsgi_active_requests == 0) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Daemon process "
+ "idle inactivity timer expired, "
+ "stopping process '%s'.", getpid(),
+ group->name);
+
+ restart = 1;
+ }
+ else {
+ /* Ignore for now as still have requests. */
+
+ if (!period || (wsgi_idle_timeout < period))
+ period = wsgi_idle_timeout;
+ }
+ }
+ else {
+ if (!period || ((idle_time - now) < period))
+ period = idle_time - now;
+ else if (wsgi_busy_timeout < period)
+ period = wsgi_busy_timeout;
+ }
+ }
+ else {
+ if (!period || (wsgi_idle_timeout < period))
+ period = wsgi_idle_timeout;
+ }
+ }
+
+ if (!restart && wsgi_busy_timeout) {
+ if (busy_time) {
+ if (busy_time <= now) {
+ if (wsgi_active_requests >= group->blocked_requests) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Daemon process "
+ "busy inactivity timer expired, "
+ "stopping process '%s'.", getpid(),
+ group->name);
+
+ wsgi_dump_blocked_requests = 1;
+
+ restart = 1;
+ }
+ else {
+ /* Ignore for now as not at limit of requests. */
+
+ if (!period || (wsgi_busy_timeout < period))
+ period = wsgi_busy_timeout;
+ }
+ }
+ else {
+ if (!period || ((busy_time - now) < period))
+ period = busy_time - now;
+ else if (wsgi_busy_timeout < period)
+ period = wsgi_busy_timeout;
+ }
+ }
+ else {
+ if (!period || (wsgi_busy_timeout < period))
+ period = wsgi_busy_timeout;
+ }
+ }
+
+ if (!restart && wsgi_graceful_timeout) {
+ if (graceful_time) {
+ if (graceful_time <= now) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Daemon process "
- "inactivity timer expired, stopping "
- "process '%s'.", getpid(),
- daemon->group->name);
+ "graceful timer expired '%s'.", getpid(),
+ group->name);
restart = 1;
}
else {
- if (!period || ((inactivity_time - now) < period))
- period = inactivity_time - now;
+ if (!period || ((graceful_time - now) < period))
+ period = graceful_time - now;
+ else if (wsgi_graceful_timeout < period)
+ period = wsgi_graceful_timeout;
}
}
else {
- if (!period || (wsgi_inactivity_timeout < period))
- period = wsgi_inactivity_timeout;
+ if (!period || (wsgi_graceful_timeout < period))
+ period = wsgi_graceful_timeout;
}
}
@@ -11166,6 +7967,119 @@ static void *wsgi_monitor_thread(apr_thread_t *thd, void *data)
return NULL;
}
+static void wsgi_log_stack_traces(void)
+{
+ PyGILState_STATE state;
+
+ PyObject *threads = NULL;
+
+ /*
+ * This should only be called on shutdown so don't try and log
+ * any errors, just dump them straight out.
+ */
+
+ state = PyGILState_Ensure();
+
+ threads = _PyThread_CurrentFrames();
+
+ if (threads && PyDict_Size(threads) != 0) {
+ PyObject *seq = NULL;
+
+ seq = PyObject_GetIter(threads);
+
+ if (seq) {
+ PyObject *id = NULL;
+ PyObject *frame = NULL;
+
+ Py_ssize_t i = 0;
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Dumping stack trace for "
+ "active Python threads.", getpid());
+
+ while (PyDict_Next(threads, &i, &id, &frame)) {
+ long thread_id = 0;
+
+ PyFrameObject *current = NULL;
+
+ thread_id = PyLong_AsLong(id);
+
+ current = (PyFrameObject *)frame;
+
+ while (current) {
+ int lineno;
+
+ PyObject *filename = NULL;
+ PyObject *name = NULL;
+
+ lineno = current->f_lineno;
+
+#if PY_MAJOR_VERSION > 3
+ filename = PyUnicode_EncodeUTF8(
+ current->f_code->co_filename);
+ name = PyUnicode_EncodeUTF8(
+ current->f_code->co_name);
+#else
+ Py_INCREF(current->f_code->co_filename);
+ filename = current->f_code->co_filename;
+ Py_INCREF(current->f_code->co_name);
+ name = current->f_code->co_name;
+#endif
+
+ if (current == (PyFrameObject *)frame) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Thread %ld executing "
+ "file \"%s\", line %d, in %s", getpid(),
+ thread_id, PyString_AsString(filename),
+ lineno, PyString_AsString(name));
+ }
+ else {
+ if (current->f_back) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): called from file "
+ "\"%s\", line %d, in %s,", getpid(),
+ PyString_AsString(filename), lineno,
+ PyString_AsString(name));
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): called from file "
+ "\"%s\", line %d, in %s.", getpid(),
+ PyString_AsString(filename), lineno,
+ PyString_AsString(name));
+ }
+ }
+
+ Py_DECREF(filename);
+ Py_DECREF(name);
+
+ current = current->f_back;
+ }
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Failed to iterate over "
+ "current frames for active threads.", getpid());
+
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Failed to get current frames "
+ "for active threads.", getpid());
+
+ PyErr_Print();
+ PyErr_Clear();
+ }
+
+ Py_XDECREF(threads);
+
+ PyGILState_Release(state);
+}
+
static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
{
apr_threadattr_t *thread_attr;
@@ -11206,17 +8120,16 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
/* Start monitoring thread if required. */
wsgi_deadlock_timeout = daemon->group->deadlock_timeout;
- wsgi_inactivity_timeout = daemon->group->inactivity_timeout;
-
- if (wsgi_deadlock_timeout || wsgi_inactivity_timeout) {
- apr_thread_mutex_create(&wsgi_shutdown_lock,
- APR_THREAD_MUTEX_UNNESTED, p);
+ wsgi_idle_timeout = daemon->group->inactivity_timeout;
+ wsgi_busy_timeout = daemon->group->blocked_timeout;
+ wsgi_graceful_timeout = daemon->group->graceful_timeout;
+ if (wsgi_deadlock_timeout || wsgi_idle_timeout) {
rv = apr_thread_create(&reaper, thread_attr, wsgi_monitor_thread,
daemon, p);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create monitor "
"thread in daemon process '%s'.", getpid(),
daemon->group->name);
@@ -11225,7 +8138,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
if (wsgi_deadlock_timeout) {
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create deadlock "
"thread in daemon process '%s'.", getpid(),
daemon->group->name);
@@ -11247,7 +8160,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
daemon->group->threads * sizeof(WSGIDaemonThread));
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Starting %d threads in daemon "
"process '%s'.", getpid(), daemon->group->threads,
daemon->group->name);
@@ -11257,7 +8170,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
WSGIDaemonThread *thread = &wsgi_worker_threads[i];
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Starting thread %d in daemon "
"process '%s'.", getpid(), i+1, daemon->group->name);
}
@@ -11267,7 +8180,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
rv = apr_thread_cond_create(&thread->condition, p);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create worker "
"thread %d state condition variable in daemon "
"process '%s'.", getpid(), i, daemon->group->name);
@@ -11285,7 +8198,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create worker "
"thread %d state mutex variable in daemon "
"process '%s'.", getpid(), i, daemon->group->name);
@@ -11309,7 +8222,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
wsgi_daemon_thread, thread, p);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create worker "
"thread %d in daemon process '%s'.", getpid(),
i, daemon->group->name);
@@ -11326,17 +8239,85 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
/* Block until we get a process shutdown signal. */
- do {
+ while (1) {
+ char buf[1];
+ apr_size_t nbytes = 1;
+
rv = apr_poll(&poll_fd, 1, &poll_count, -1);
- } while (APR_STATUS_IS_EINTR(rv));
+ if (APR_STATUS_IS_EINTR(rv))
+ continue;
+
+ rv = apr_file_read(wsgi_signal_pipe_in, buf, &nbytes);
+
+ if (rv != APR_SUCCESS || nbytes != 1) {
+ ap_log_error(APLOG_MARK, APLOG_ALERT, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Failed read on signal pipe '%s'.",
+ getpid(), daemon->group->name);
+
+ break;
+ }
+
+ if (buf[0] == 'C') {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exceeded CPU time limit '%s'.",
+ getpid(), daemon->group->name);
+
+ if (!wsgi_daemon_graceful) {
+ if (wsgi_active_requests) {
+ wsgi_daemon_graceful++;
+
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+ wsgi_graceful_shutdown_time = apr_time_now();
+ wsgi_graceful_shutdown_time += wsgi_graceful_timeout;
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exceeded CPU time "
+ "limit, waiting for requests to complete "
+ "'%s'.", getpid(), daemon->group->name);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exceeded CPU time "
+ "limit, triggering immediate shutdown "
+ "'%s'.", getpid(), daemon->group->name);
+
+ wsgi_daemon_shutdown++;
+ kill(getpid(), SIGINT);
+ }
+ }
+ }
+ else if (buf[0] == 'G') {
+ if (!wsgi_daemon_graceful) {
+ if (wsgi_active_requests) {
+ wsgi_daemon_graceful++;
- if (wsgi_cpu_time_limit_exceeded) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
- "mod_wsgi (pid=%d): Exceeded CPU time limit '%s'.",
- getpid(), daemon->group->name);
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+ wsgi_graceful_shutdown_time = apr_time_now();
+ wsgi_graceful_shutdown_time += wsgi_graceful_timeout;
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Graceful shutdown "
+ "requested, waiting for requests to complete "
+ "'%s'.", getpid(), daemon->group->name);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Graceful shutdown "
+ "requested, triggering immediate shutdown "
+ "'%s'.", getpid(), daemon->group->name);
+
+ wsgi_daemon_shutdown++;
+ kill(getpid(), SIGINT);
+ }
+ }
+ }
+ else
+ break;
}
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Shutdown requested '%s'.",
getpid(), daemon->group->name);
@@ -11351,7 +8332,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
daemon, p);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create reaper "
"thread in daemon process '%s'.", getpid(),
daemon->group->name);
@@ -11359,6 +8340,15 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
}
/*
+ * If shutting down process due to reach block requests
+ * limit, then try and dump out stack traces of any threads
+ * which are running as a debugging aid.
+ */
+
+ if (wsgi_dump_blocked_requests)
+ wsgi_log_stack_traces();
+
+ /*
* Attempt a graceful shutdown by waiting for any
* threads which were processing a request at the time
* of shutdown. In some respects this is a bit pointless
@@ -11375,7 +8365,7 @@ static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon)
if (wsgi_worker_threads[i].thread && wsgi_worker_threads[i].running) {
rv = apr_thread_join(&thread_rv, wsgi_worker_threads[i].thread);
if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server,
"mod_wsgi (pid=%d): Couldn't join with "
"worker thread %d in daemon process '%s'.",
getpid(), i, daemon->group->name);
@@ -11395,14 +8385,14 @@ static apr_status_t wsgi_cleanup_process(void *data)
if (group->listener_fd != -1) {
if (close(group->listener_fd) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno),
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't close unix domain socket '%s'.",
getpid(), group->socket);
}
if (unlink(group->socket) < 0 && errno != ENOENT) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno),
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't unlink unix domain socket '%s'.",
getpid(), group->socket);
@@ -11423,21 +8413,21 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
int i = 0;
if ((status = apr_proc_fork(&daemon->process, p)) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server,
"mod_wsgi: Couldn't spawn process '%s'.",
daemon->group->name);
return DECLINED;
}
else if (status == APR_INCHILD) {
if (!geteuid()) {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Starting process '%s' with "
"uid=%ld, gid=%u and threads=%d.", getpid(),
daemon->group->name, (long)daemon->group->uid,
(unsigned)daemon->group->gid, daemon->group->threads);
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Starting process '%s' with "
"threads=%d.", getpid(), daemon->group->name,
daemon->group->threads);
@@ -11453,7 +8443,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
status = bindprocessor(BINDPROCESS, (int)getpid(),
PROCESSOR_CLASS_ANY);
if (status != OK) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server,
"mod_wsgi (pid=%d): Failed to unbind processor.",
getpid());
}
@@ -11468,7 +8458,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
if (daemon->group->cpu_priority != 0) {
if (setpriority(PRIO_PROCESS, 0,
daemon->group->cpu_priority) == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't set CPU priority "
"in daemon process '%d'.", getpid(),
daemon->group->cpu_priority);
@@ -11486,7 +8476,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
daemon->group->mutex_path, p);
if (status != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
"mod_wsgi (pid=%d): Couldn't intialise accept "
"mutex in daemon process '%s'.",
getpid(), daemon->group->mutex_path);
@@ -11595,10 +8585,13 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
&wsgi_signal_pipe_out, p);
if (status != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_EMERG(status), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_EMERG, status, wsgi_server,
"mod_wsgi (pid=%d): Couldn't initialise signal "
"pipe in daemon process '%s'.", getpid(),
daemon->group->name);
+
+ /* Don't die immediately to avoid a fork bomb. */
+
sleep(20);
exit(-1);
@@ -11608,6 +8601,9 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
apr_signal(SIGINT, wsgi_signal_handler);
apr_signal(SIGTERM, wsgi_signal_handler);
+
+ apr_signal(AP_SIG_GRACEFUL, wsgi_signal_handler);
+
#ifdef SIGXCPU
apr_signal(SIGXCPU, wsgi_signal_handler);
#endif
@@ -11628,7 +8624,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
#endif
if (result == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
"mod_wsgi (pid=%d): Couldn't set CPU time "
"limit of %d seconds for process '%s'.", getpid(),
daemon->group->cpu_time_limit,
@@ -11637,9 +8633,9 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
}
/*
- * Set limits on amount of date segment memory that can
- * be used. Although this is done, some platforms
- * doesn't actually support it.
+ * Set limits on amount of date segment memory that can
+ * be used. Although this is done, some platforms
+ * doesn't actually support it.
*/
if (daemon->group->memory_limit > 0) {
@@ -11655,7 +8651,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
#endif
if (result == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
"mod_wsgi (pid=%d): Couldn't set memory time "
"limit of %ld for process '%s'.", getpid(),
(long)daemon->group->memory_limit,
@@ -11684,7 +8680,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
#endif
if (result == -1) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
"mod_wsgi (pid=%d): Couldn't set virtual memory "
"time limit of %ld for process '%s'.", getpid(),
(long)daemon->group->virtual_memory_limit,
@@ -11867,7 +8863,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
if (daemon->group->server) {
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Process '%s' logging to "
"'%s'.", getpid(), daemon->group->name,
daemon->group->server->server_hostname);
@@ -11877,7 +8873,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
}
else {
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Process '%s' forced to log "
"to '%s'.", getpid(), daemon->group->name,
wsgi_server->server_hostname);
@@ -11895,10 +8891,20 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
*/
wsgi_python_initialized = 1;
+
wsgi_python_path = daemon->group->python_path;
wsgi_python_eggs = daemon->group->python_eggs;
+
+ wsgi_newrelic_config_file = daemon->group->newrelic_config_file;
+ wsgi_newrelic_environment = daemon->group->newrelic_environment;
+
wsgi_python_child_init(wsgi_daemon_pool);
+ /* Create lock for request monitoring. */
+
+ apr_thread_mutex_create(&wsgi_monitor_lock,
+ APR_THREAD_MUTEX_UNNESTED, p);
+
/*
* Create socket wrapper for listener file descriptor
* and mutex for controlling which thread gets to
@@ -11916,7 +8922,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon)
* have the side affect of also destroying Python.
*/
- ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
"mod_wsgi (pid=%d): Stopping process '%s'.", getpid(),
daemon->group->name);
@@ -11995,7 +9001,7 @@ static int wsgi_start_daemons(apr_pool_t *p)
entry->uid = ap_unixd_config.user_id;
entry->user = ap_unixd_config.user_name;
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Reset default user for "
"daemon process group '%s' to uid=%ld.",
getpid(), entry->name, (long)entry->uid);
@@ -12004,7 +9010,7 @@ static int wsgi_start_daemons(apr_pool_t *p)
if (entry->gid == ap_gname2id(DEFAULT_GROUP)) {
entry->gid = ap_unixd_config.group_id;
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Reset default group for "
"daemon process group '%s' to gid=%ld.",
getpid(), entry->name, (long)entry->gid);
@@ -12053,7 +9059,7 @@ static int wsgi_start_daemons(apr_pool_t *p)
p);
if (status != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server,
"mod_wsgi (pid=%d): Couldn't create accept "
"lock '%s' (%d).", getpid(), entry->mutex_path,
wsgi_server_config->lock_mechanism);
@@ -12088,7 +9094,7 @@ static int wsgi_start_daemons(apr_pool_t *p)
buf.sem_perm.mode = 0600;
ick.buf = &buf;
if (semctl(ospmutex.crossproc, 0, IPC_SET, ick) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, errno,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't set permissions on accept "
"mutex '%s' (sysvsem).", getpid(),
@@ -12100,7 +9106,7 @@ static int wsgi_start_daemons(apr_pool_t *p)
#if APR_HAS_FLOCK_SERIALIZE
if (!strcmp(apr_proc_mutex_name(entry->mutex), "flock")) {
if (chown(entry->mutex_path, entry->uid, -1) < 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno),
+ ap_log_error(APLOG_MARK, APLOG_CRIT, errno,
wsgi_server, "mod_wsgi (pid=%d): "
"Couldn't set permissions on accept "
"mutex '%s' (flock).", getpid(),
@@ -12155,7 +9161,7 @@ static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon)
retries++;
if ((daemon->fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r,
"mod_wsgi (pid=%d): Unable to create socket to "
"connect to WSGI daemon process.", getpid());
@@ -12164,7 +9170,7 @@ static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon)
if (connect(daemon->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
if (errno == ECONNREFUSED && retries < WSGI_CONNECT_ATTEMPTS) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r,
"mod_wsgi (pid=%d): Connection attempt #%d to "
"WSGI daemon process '%s' on '%s' failed, "
"sleeping before retrying again.", getpid(),
@@ -12174,20 +9180,20 @@ static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon)
/*
* Progressively increase time we wait between
- * connection attempts. Start at 0.1 second and
- * double each time but apply ceiling at 2.0
+ * connection attempts. Start at 0.125 second and
+ * double each time but apply ceiling at 4.0
* seconds.
*/
if (!timer)
- timer = apr_time_make(0, 100000);
+ timer = apr_time_make(0, 125000);
apr_sleep(timer);
timer = (2 * timer) % apr_time_make(2, 0);
}
else {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r,
"mod_wsgi (pid=%d): Unable to connect to "
"WSGI daemon process '%s' on '%s' after "
"multiple attempts.", getpid(), daemon->name,
@@ -12641,14 +9647,14 @@ static int wsgi_execute_remote(request_rec *r)
/* Send request details and subprocess environment. */
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Request server was "
"'%s|%d'.", getpid(), r->server->server_hostname,
r->server->port);
}
if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"mod_wsgi (pid=%d): Unable to send request details "
"to WSGI daemon process '%s' on '%s'.", getpid(),
daemon->name, daemon->socket);
@@ -12713,7 +9719,7 @@ static int wsgi_execute_remote(request_rec *r)
*/
if (r->status != 200) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Unexpected status from "
"WSGI daemon process '%d'.", getpid(), r->status);
return HTTP_INTERNAL_SERVER_ERROR;
@@ -12723,7 +9729,7 @@ static int wsgi_execute_remote(request_rec *r)
break;
if (strcmp(r->status_line, "200 Rejected")) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Unexpected status from "
"WSGI daemon process '%d'.", getpid(), r->status);
return HTTP_INTERNAL_SERVER_ERROR;
@@ -12736,7 +9742,7 @@ static int wsgi_execute_remote(request_rec *r)
/* Has maximum number of attempts been reached. */
if (retries == maximum) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"mod_wsgi (pid=%d): Maximum number of WSGI "
"daemon process restart connects reached '%d'.",
getpid(), maximum);
@@ -12745,7 +9751,7 @@ static int wsgi_execute_remote(request_rec *r)
retries++;
- ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r,
"mod_wsgi (pid=%d): Connect after WSGI daemon "
"process restart, attempt #%d.", getpid(),
retries);
@@ -12756,7 +9762,7 @@ static int wsgi_execute_remote(request_rec *r)
return status;
if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"mod_wsgi (pid=%d): Unable to send request "
"details to WSGI daemon process '%s' on '%s'.",
getpid(), daemon->name, daemon->socket);
@@ -12803,7 +9809,7 @@ static int wsgi_execute_remote(request_rec *r)
APR_BLOCK_READ, HUGE_STRING_LEN);
if (rv != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"mod_wsgi (pid=%d): Unable to get bucket brigade "
"for request.", getpid());
return HTTP_INTERNAL_SERVER_ERROR;
@@ -12873,8 +9879,10 @@ static int wsgi_execute_remote(request_rec *r)
* here for status but Apache 2.4 prohibits it now.
*/
- if (r->status == 200 && !strcmp(r->status_line, "200 Error"))
+ if (r->status == 200 && !strcmp(r->status_line, "200 Error")) {
+ r->status_line = NULL;
return HTTP_INTERNAL_SERVER_ERROR;
+ }
/*
* Look for 'Location' header and if an internal
@@ -13166,11 +10174,6 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
ap_filter_t *current = NULL;
ap_filter_t *next = NULL;
- const apr_array_header_t *head = NULL;
- const apr_table_entry_t *elts = NULL;
-
- int i = 0;
-
const char *item;
/* Don't do anything if not in daemon process. */
@@ -13299,7 +10302,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
/* Read in the request details and setup request object. */
if ((rv = wsgi_read_request(csd, r)) != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server,
"mod_wsgi (pid=%d): Unable to read WSGI request.",
getpid());
@@ -13316,7 +10319,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
magic = apr_table_get(r->subprocess_env, "mod_wsgi.magic");
if (!magic) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Request origin could not be "
"validated.", getpid());
@@ -13332,7 +10335,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
memset(key, '\0', strlen(key));
if (strcmp(magic, hash) != 0) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server,
"mod_wsgi (pid=%d): Request origin could not be "
"validated.", getpid());
@@ -13369,7 +10372,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
filename = path;
}
else {
- ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server,
"mod_wsgi (pid=%d): WSGI script '%s' not located "
"within chroot directory '%s'.", getpid(), path, root);
@@ -13390,20 +10393,37 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
r->filename = (char *)filename;
- /* Recalculate WSGI script file modification time. */
+ /* Recalculate WSGI script or handler script modification time. */
- if ((rv = apr_stat(&r->finfo, filename, APR_FINFO_NORM,
- r->pool)) != APR_SUCCESS) {
- /*
- * Don't fail at this point. Allow the lack of file to
- * be detected later when trying to load the script file.
- */
+ if (script && *script) {
+ if ((rv = apr_stat(&r->finfo, script, APR_FINFO_NORM,
+ r->pool)) != APR_SUCCESS) {
+ /*
+ * Don't fail at this point. Allow the lack of file to
+ * be detected later when trying to load the script file.
+ */
- ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(rv), wsgi_server,
- "mod_wsgi (pid=%d): Unable to stat target WSGI script "
- "'%s'.", getpid(), filename);
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to stat target handler "
+ "script '%s'.", getpid(), script);
- r->finfo.mtime = 0;
+ r->finfo.mtime = 0;
+ }
+ }
+ else {
+ if ((rv = apr_stat(&r->finfo, filename, APR_FINFO_NORM,
+ r->pool)) != APR_SUCCESS) {
+ /*
+ * Don't fail at this point. Allow the lack of file to
+ * be detected later when trying to load the script file.
+ */
+
+ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to stat target WSGI "
+ "script '%s'.", getpid(), filename);
+
+ r->finfo.mtime = 0;
+ }
}
/*
@@ -13432,7 +10452,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
"mod_wsgi.listener_port"));
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Server listener address '%s'.",
getpid(), key);
}
@@ -13441,7 +10461,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
key, APR_HASH_KEY_STRING);
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Server listener address '%s' was"
"%s found.", getpid(), key, addr ? "" : " not");
}
@@ -13453,7 +10473,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
ap_update_vhost_given_ip(r->connection);
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Connection server matched was "
"'%s|%d'.", getpid(), c->base_server->server_hostname,
c->base_server->port);
@@ -13469,7 +10489,7 @@ static int wsgi_hook_daemon_handler(conn_rec *c)
ap_update_vhost_from_headers(r);
if (wsgi_server_config->verbose_debugging) {
- ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
"mod_wsgi (pid=%d): Request server matched was '%s|%d'.",
getpid(), r->server->server_hostname, r->server->port);
}
@@ -13600,18 +10620,40 @@ static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp,
apr_pool_t *plog, server_rec *s)
{
void *data = NULL;
- const char *userdata_key = "wsgi_init";
+ const char *userdata_key;
char package[128];
int status = OK;
/*
+ * No longer support using mod_python at the same time as
+ * mod_wsgi as becoming too painful to hack around
+ * mod_python's broken usage of threading APIs when align
+ * code to the stricter API requirements of Python 3.2.
+ */
+
+ userdata_key = "python_init";
+
+ apr_pool_userdata_get(&data, userdata_key, s->process->pool);
+ if (data) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL,
+ "mod_wsgi (pid=%d): The mod_python module can "
+ "not be used on conjunction with mod_wsgi 4.0+. "
+ "Remove the mod_python module from the Apache "
+ "configuration.", getpid());
+
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ /*
* Init function gets called twice during startup, we only
* need to actually do anything on the second time it is
* called. This avoids unecessarily initialising and then
* destroying Python for no reason.
*/
+ userdata_key = "wsgi_init";
+
apr_pool_userdata_get(&data, userdata_key, s->process->pool);
if (!data) {
apr_pool_userdata_set((const void *)1, userdata_key,
@@ -13698,6 +10740,8 @@ static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp,
static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s)
{
+ int rv;
+
#if defined(MOD_WSGI_WITH_DAEMONS)
WSGIProcessGroup *entries = NULL;
WSGIProcessGroup *entry = NULL;
@@ -13718,6 +10762,11 @@ static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s)
}
#endif
+ /* Create lock for request monitoring. */
+
+ apr_thread_mutex_create(&wsgi_monitor_lock,
+ APR_THREAD_MUTEX_UNNESTED, p);
+
if (wsgi_python_required) {
/*
* Initialise Python if required to be done in
@@ -13738,8 +10787,6 @@ static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s)
}
}
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
-
#include "apr_lib.h"
static char *wsgi_original_uri(request_rec *r)
@@ -14053,6 +11100,71 @@ static PyObject *Auth_environ(AuthObject *self, const char *group)
PyDict_SetItemString(vars, "REQUEST_URI", object);
Py_DECREF(object);
+ /*
+ * XXX Apparently webdav does actually do modifications to
+ * the uri and path_info attributes of request and they
+ * could be used as part of authorisation.
+ */
+
+ if (!strcmp(r->protocol, "INCLUDED")) {
+ value = r->uri;
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "SCRIPT_NAME", object);
+ Py_DECREF(object);
+
+ value = r->path_info ? r->path_info : "";
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "PATH_INFO", object);
+ Py_DECREF(object);
+ }
+ else if (!r->path_info || !*r->path_info) {
+ value = r->uri;
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "SCRIPT_NAME", object);
+ Py_DECREF(object);
+
+ value = "";
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "PATH_INFO", object);
+ Py_DECREF(object);
+ }
+ else {
+ int path_info_start = ap_find_path_info(r->uri, r->path_info);
+ value = apr_pstrndup(r->pool, r->uri, path_info_start);
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "SCRIPT_NAME", object);
+ Py_DECREF(object);
+
+ value = r->path_info ? r->path_info : "";
+#if PY_MAJOR_VERSION >= 3
+ object = PyUnicode_DecodeLatin1(value, strlen(value), NULL);
+#else
+ object = PyString_FromString(value);
+#endif
+ PyDict_SetItemString(vars, "PATH_INFO", object);
+ Py_DECREF(object);
+ }
+
#if PY_MAJOR_VERSION >= 3
object = PyUnicode_FromString("");
#else
@@ -14087,7 +11199,8 @@ static PyObject *Auth_environ(AuthObject *self, const char *group)
*/
if (!wsgi_daemon_pool && self->config->pass_apache_request) {
-#if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2
+#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \
+ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7)
object = PyCapsule_New(self->r, 0, 0);
#else
object = PyCObject_FromVoidPtr(self->r, 0);
@@ -14101,7 +11214,6 @@ static PyObject *Auth_environ(AuthObject *self, const char *group)
* mod_ssl when in use.
*/
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https");
PyDict_SetItemString(vars, "mod_ssl.is_https", object);
Py_DECREF(object);
@@ -14109,13 +11221,10 @@ static PyObject *Auth_environ(AuthObject *self, const char *group)
object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup");
PyDict_SetItemString(vars, "mod_ssl.var_lookup", object);
Py_DECREF(object);
-#endif
return vars;
}
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
-
static PyObject *Auth_ssl_is_https(AuthObject *self, PyObject *args)
{
APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0;
@@ -14203,13 +11312,9 @@ static PyObject *Auth_ssl_var_lookup(AuthObject *self, PyObject *args)
#endif
}
-#endif
-
static PyMethodDef Auth_methods[] = {
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
{ "ssl_is_https", (PyCFunction)Auth_ssl_is_https, METH_VARARGS, 0 },
{ "ssl_var_lookup", (PyCFunction)Auth_ssl_var_lookup, METH_VARARGS, 0 },
-#endif
{ NULL, NULL}
};
@@ -14277,7 +11382,7 @@ static authn_status wsgi_check_password(request_rec *r, const char *user,
config = wsgi_create_req_config(r->pool, r);
if (!config->auth_user_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI user "
"authentication script not provided.", getpid());
@@ -14295,7 +11400,7 @@ static authn_status wsgi_check_password(request_rec *r, const char *user,
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -14451,7 +11556,7 @@ static authn_status wsgi_check_password(request_rec *r, const char *user,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI user "
"authentication script '%s' does not provide "
"'Basic' auth provider.", getpid(), script);
@@ -14492,7 +11597,7 @@ static authn_status wsgi_get_realm_hash(request_rec *r, const char *user,
config = wsgi_create_req_config(r->pool, r);
if (!config->auth_user_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI user "
"authentication script not provided.", getpid());
@@ -14510,7 +11615,7 @@ static authn_status wsgi_get_realm_hash(request_rec *r, const char *user,
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -14688,7 +11793,7 @@ static authn_status wsgi_get_realm_hash(request_rec *r, const char *user,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI user "
"authentication script '%s' does not provide "
"'Digest' auth provider.", getpid(), script);
@@ -14734,7 +11839,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
int status = HTTP_INTERNAL_SERVER_ERROR;
if (!config->auth_group_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI group "
"authentication script not provided.", getpid());
@@ -14752,7 +11857,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -14870,7 +11975,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
latin_item = PyUnicode_AsLatin1String(item);
if (!latin_item) {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0),
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0,
r, "mod_wsgi (pid=%d): "
"Groups for user returned "
"from '%s' must be an "
@@ -14896,7 +12001,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
if (!PyString_Check(item)) {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Groups for "
"user returned from '%s' must "
"be an iterable sequence of "
@@ -14923,7 +12028,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Groups for user "
"returned from '%s' must be an iterable "
"sequence of byte strings.", getpid(),
@@ -14971,7 +12076,7 @@ static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI group "
"authentication script '%s' does not provide "
"group provider.", getpid(), script);
@@ -15011,7 +12116,7 @@ static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config,
int result = 0;
if (!config->access_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI host "
"access script not provided.", getpid());
@@ -15029,7 +12134,7 @@ static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config,
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -15139,7 +12244,7 @@ static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Indicator of "
"host accessibility returned from '%s' "
"must a boolean or None.", getpid(),
@@ -15187,7 +12292,7 @@ static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config,
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI host "
"access script '%s' does not provide "
"host validator.", getpid(), script);
@@ -15240,7 +12345,7 @@ static int wsgi_hook_access_checker(request_rec *r)
return OK;
if (ap_satisfies(r) != SATISFY_ANY || !ap_some_auth_required(r)) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): "
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): "
"Client denied by server configuration: '%s'.",
getpid(), r->filename);
}
@@ -15248,6 +12353,7 @@ static int wsgi_hook_access_checker(request_rec *r)
return HTTP_FORBIDDEN;
}
+#if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER)
static int wsgi_hook_check_user_id(request_rec *r)
{
WSGIRequestConfig *config;
@@ -15284,7 +12390,7 @@ static int wsgi_hook_check_user_id(request_rec *r)
interp = wsgi_acquire_interpreter(group);
if (!interp) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), group);
@@ -15390,7 +12496,7 @@ static int wsgi_hook_check_user_id(request_rec *r)
ap_note_basic_auth_failure(r);
status = HTTP_UNAUTHORIZED;
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): User '%s' not "
"found in executing authentication "
"script '%s', for uri '%s'.",
@@ -15406,7 +12512,7 @@ static int wsgi_hook_check_user_id(request_rec *r)
ap_note_basic_auth_failure(r);
status = HTTP_UNAUTHORIZED;
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Password mismatch "
"for user '%s' in executing "
"authentication script '%s', for uri "
@@ -15459,7 +12565,7 @@ static int wsgi_hook_check_user_id(request_rec *r)
}
else {
Py_BEGIN_ALLOW_THREADS
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
"mod_wsgi (pid=%d): Target WSGI user "
"authentication script '%s' does not provide "
"'Basic' auth provider.", getpid(), script);
@@ -15480,6 +12586,7 @@ static int wsgi_hook_check_user_id(request_rec *r)
return status;
}
+#endif
#if defined(MOD_WSGI_WITH_AUTHZ_PROVIDER)
@@ -15501,7 +12608,7 @@ static authz_status wsgi_check_authorization(request_rec *r,
config = wsgi_create_req_config(r->pool, r);
if (!config->auth_group_script) {
- ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server,
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
"mod_wsgi (pid=%d): Location of WSGI group "
"authorization script not provided.", getpid());
@@ -15514,7 +12621,7 @@ static authz_status wsgi_check_authorization(request_rec *r,
return AUTHZ_DENIED;
if (apr_table_elts(grpstatus)->nelts == 0) {
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): "
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): "
"Authorization of user '%s' to access '%s' failed. "
"User is not a member of any groups.", getpid(),
r->user, r->uri);
@@ -15528,7 +12635,7 @@ static authz_status wsgi_check_authorization(request_rec *r,
}
}
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): "
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): "
"Authorization of user '%s' to access '%s' failed. "
"User is not a member of designated groups.", getpid(),
r->user, r->uri);
@@ -15609,7 +12716,7 @@ static int wsgi_hook_auth_checker(request_rec *r)
if (!required_group || !config->group_authoritative)
return DECLINED;
- ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): "
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): "
"Authorization of user '%s' to access '%s' failed. %s.",
getpid(), r->user, r->uri, reason ? reason : "User is not "
"a member of designated groups");
@@ -15621,8 +12728,6 @@ static int wsgi_hook_auth_checker(request_rec *r)
#endif
-#endif
-
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *wsgi_logio_add_bytes_out;
static void ap_logio_add_bytes_out(conn_rec *c, apr_off_t bytes)
@@ -15657,7 +12762,6 @@ static void wsgi_register_hooks(apr_pool_t *p)
static const char * const n2[] = { "core.c", NULL };
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
#if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER)
static const char * const p3[] = { "mod_auth.c", NULL };
#endif
@@ -15665,7 +12769,6 @@ static void wsgi_register_hooks(apr_pool_t *p)
static const char * const n4[] = { "mod_authz_user.c", NULL };
#endif
static const char * const n5[] = { "mod_authz_host.c", NULL };
-#endif
static const char * const p6[] = { "mod_python.c", NULL };
@@ -15685,7 +12788,6 @@ static void wsgi_register_hooks(apr_pool_t *p)
NULL, AP_FTYPE_PROTOCOL);
#endif
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
#if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER)
ap_hook_check_user_id(wsgi_hook_check_user_id, p3, NULL, APR_HOOK_MIDDLE);
#else
@@ -15699,7 +12801,6 @@ static void wsgi_register_hooks(apr_pool_t *p)
AUTHZ_PROVIDER_VERSION, &wsgi_authz_provider);
#endif
ap_hook_access_checker(wsgi_hook_access_checker, p7, n5, APR_HOOK_MIDDLE);
-#endif
}
static const command_rec wsgi_commands[] =
@@ -15741,6 +12842,8 @@ static const command_rec wsgi_commands[] =
NULL, RSRC_CONF, "Python module search path."),
AP_INIT_TAKE1("WSGIPythonEggs", wsgi_set_python_eggs,
NULL, RSRC_CONF, "Python eggs cache directory."),
+ AP_INIT_TAKE1("WSGIPythonHashSeed", wsgi_set_python_hash_seed,
+ NULL, RSRC_CONF, "Python hash seed."),
#if defined(MOD_WSGI_WITH_DAEMONS)
AP_INIT_TAKE1("WSGIRestrictEmbedded", wsgi_set_restrict_embedded,
@@ -15785,13 +12888,10 @@ static const command_rec wsgi_commands[] =
NULL, OR_FILEINFO, "Enable/Disable support for chunked requests."),
#ifndef WIN32
-#if AP_SERVER_MAJORVERSION_NUMBER >= 2
AP_INIT_TAKE1("WSGIEnableSendfile", wsgi_set_enable_sendfile,
NULL, OR_FILEINFO, "Enable/Disable support for kernel sendfile."),
#endif
-#endif
-#if defined(MOD_WSGI_WITH_AAA_HANDLERS)
AP_INIT_RAW_ARGS("WSGIAccessScript", wsgi_set_access_script,
NULL, OR_AUTHCFG, "Location of WSGI host access script file."),
AP_INIT_RAW_ARGS("WSGIAuthUserScript", wsgi_set_auth_user_script,
@@ -15804,11 +12904,15 @@ static const command_rec wsgi_commands[] =
#endif
AP_INIT_TAKE1("WSGIGroupAuthoritative", wsgi_set_group_authoritative,
NULL, OR_AUTHCFG, "Enable/Disable as being authoritative on groups."),
-#endif
AP_INIT_RAW_ARGS("WSGIHandlerScript", wsgi_add_handler_script,
NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI handler script file."),
+ AP_INIT_TAKE1("WSGINewRelicConfigFile", wsgi_set_newrelic_config_file,
+ NULL, RSRC_CONF, "New Relic monitoring agent configuration file."),
+ AP_INIT_TAKE1("WSGINewRelicEnvironment", wsgi_set_newrelic_environment,
+ NULL, RSRC_CONF, "New Relic monitoring agent environment."),
+
{ NULL }
};
@@ -15824,4 +12928,6 @@ module AP_MODULE_DECLARE_DATA wsgi_module = {
wsgi_register_hooks /* register hooks */
};
-#endif
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_apache.c b/src/server/wsgi_apache.c
new file mode 100644
index 0000000..9dd95cc
--- /dev/null
+++ b/src/server/wsgi_apache.c
@@ -0,0 +1,161 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_apache.h"
+
+#include "wsgi_daemon.h"
+
+/* ------------------------------------------------------------------------- */
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20051115,0)
+
+void wsgi_ap_close_listeners(void)
+{
+ ap_listen_rec *lr;
+
+ for (lr = ap_listeners; lr; lr = lr->next) {
+ apr_socket_close(lr->sd);
+ lr->active = 0;
+ }
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#if (APR_MAJOR_VERSION == 0) && \
+ (APR_MINOR_VERSION == 9) && \
+ (APR_PATCH_VERSION < 5)
+
+#define apr_unix_file_cleanup wsgi_apr_unix_file_cleanup
+
+apr_status_t wsgi_apr_unix_file_cleanup(void *thefile)
+{
+ apr_file_t *file = thefile;
+
+ return apr_file_close(file);
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
+
+apr_status_t wsgi_apr_os_pipe_put_ex(apr_file_t **file,
+ apr_os_file_t *thefile,
+ int register_cleanup,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+
+ rv = apr_os_pipe_put(file, thefile, pool);
+
+ if (register_cleanup) {
+ apr_pool_cleanup_register(pool, (void *)(*file),
+ apr_unix_file_cleanup,
+ apr_pool_cleanup_null);
+ }
+
+ return rv;
+}
+
+#endif
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
+APR_DECLARE(apr_status_t) apr_conv_utf8_to_ucs2(const char *in,
+ apr_size_t *inbytes,
+ apr_wchar_t *out,
+ apr_size_t *outwords);
+
+apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr,
+ apr_size_t retlen,
+ const char* srcstr)
+{
+ /* TODO: The computations could preconvert the string to determine
+ * the true size of the retstr, but that's a memory over speed
+ * tradeoff that isn't appropriate this early in development.
+ *
+ * Allocate the maximum string length based on leading 4
+ * characters of \\?\ (allowing nearly unlimited path lengths)
+ * plus the trailing null, then transform /'s into \\'s since
+ * the \\?\ form doesn't allow '/' path seperators.
+ *
+ * Note that the \\?\ form only works for local drive paths, and
+ * \\?\UNC\ is needed UNC paths.
+ */
+ apr_size_t srcremains = strlen(srcstr) + 1;
+ apr_wchar_t *t = retstr;
+ apr_status_t rv;
+
+ /* This is correct, we don't twist the filename if it is will
+ * definately be shorter than 248 characters. It merits some
+ * performance testing to see if this has any effect, but there
+ * seem to be applications that get confused by the resulting
+ * Unicode \\?\ style file names, especially if they use argv[0]
+ * or call the Win32 API functions such as GetModuleName, etc.
+ * Not every application is prepared to handle such names.
+ *
+ * Note also this is shorter than MAX_PATH, as directory paths
+ * are actually limited to 248 characters.
+ *
+ * Note that a utf-8 name can never result in more wide chars
+ * than the original number of utf-8 narrow chars.
+ */
+ if (srcremains > 248) {
+ if (srcstr[1] == ':' && (srcstr[2] == '/' || srcstr[2] == '\\')) {
+ wcscpy (retstr, L"\\\\?\\");
+ retlen -= 4;
+ t += 4;
+ }
+ else if ((srcstr[0] == '/' || srcstr[0] == '\\')
+ && (srcstr[1] == '/' || srcstr[1] == '\\')
+ && (srcstr[2] != '?')) {
+ /* Skip the slashes */
+ srcstr += 2;
+ srcremains -= 2;
+ wcscpy (retstr, L"\\\\?\\UNC\\");
+ retlen -= 8;
+ t += 8;
+ }
+ }
+
+ if (rv = apr_conv_utf8_to_ucs2(srcstr, &srcremains, t, &retlen)) {
+ return (rv == APR_INCOMPLETE) ? APR_EINVAL : rv;
+ }
+ if (srcremains) {
+ return APR_ENAMETOOLONG;
+ }
+ for (; *t; ++t)
+ if (*t == L'/')
+ *t = L'\\';
+ return APR_SUCCESS;
+}
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_apache.h b/src/server/wsgi_apache.h
new file mode 100644
index 0000000..857f8e3
--- /dev/null
+++ b/src/server/wsgi_apache.h
@@ -0,0 +1,136 @@
+#ifndef WSGI_APACHE_H
+#define WSGI_APACHE_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Enabled access to Apache private API and data structures. Need to do
+ * this to access the following:
+ *
+ * In Apache 2.X need access to ap_create_request_config().
+ *
+ * In Apache 2.X need access to core_module and core_request_config.
+ *
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#define CORE_PRIVATE 1
+
+#include "httpd.h"
+
+#if !defined(HTTPD_ROOT)
+#error Sorry, Apache developer package does not appear to be installed.
+#endif
+
+#if !defined(AP_SERVER_MAJORVERSION_NUMBER)
+#if AP_MODULE_MAGIC_AT_LEAST(20010224,0)
+#define AP_SERVER_MAJORVERSION_NUMBER 2
+#define AP_SERVER_MINORVERSION_NUMBER 0
+#define AP_SERVER_PATCHLEVEL_NUMBER 0
+#else
+#define AP_SERVER_MAJORVERSION_NUMBER 1
+#define AP_SERVER_MINORVERSION_NUMBER 3
+#define AP_SERVER_PATCHLEVEL_NUMBER 0
+#endif
+#endif
+
+#if !defined(AP_SERVER_BASEVERSION)
+#define AP_SERVER_BASEVERSION SERVER_BASEVERSION
+#endif
+
+#if AP_SERVER_MAJORVERSION_NUMBER < 2
+#error Sorry, mod_wsgi 4.0+ requires Apache 2.0+.
+#endif
+
+#include "apr_lib.h"
+#include "ap_mpm.h"
+#include "ap_compat.h"
+#include "apr_tables.h"
+#include "apr_strings.h"
+#include "http_config.h"
+#include "ap_listen.h"
+#include "apr_version.h"
+#include "apr_buckets.h"
+
+#include "apr_optional.h"
+
+APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *));
+APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, (apr_pool_t *,
+ server_rec *, conn_rec *, request_rec *, char *));
+
+#include "ap_config.h"
+#include "http_core.h"
+#include "http_log.h"
+#include "http_main.h"
+#include "http_protocol.h"
+#include "http_request.h"
+#include "util_script.h"
+#include "util_md5.h"
+
+#ifndef APR_FPROT_GWRITE
+#define APR_FPROT_GWRITE APR_GWRITE
+#endif
+#ifndef APR_FPROT_WWRITE
+#define APR_FPROT_WWRITE APR_WWRITE
+#endif
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20050127,0)
+/* Debian backported ap_regex_t to Apache 2.0 and
+ * thus made official version checking break. */
+#ifndef AP_REG_EXTENDED
+typedef regex_t ap_regex_t;
+typedef regmatch_t ap_regmatch_t;
+#define AP_REG_EXTENDED REG_EXTENDED
+#endif
+#endif
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20081201,0)
+#define ap_unixd_config unixd_config
+#endif
+
+#if !AP_MODULE_MAGIC_AT_LEAST(20051115,0)
+extern void wsgi_ap_close_listeners(void);
+#define ap_close_listeners wsgi_ap_close_listeners
+#endif
+
+#if (APR_MAJOR_VERSION == 0) && \
+ (APR_MINOR_VERSION == 9) && \
+ (APR_PATCH_VERSION < 5)
+extern apr_status_t wsgi_apr_unix_file_cleanup(void *);
+extern apr_status_t wsgi_apr_os_pipe_put_ex(apr_file_t **, apr_os_file_t *,
+ int, apr_pool_t *);
+#define apr_unix_file_cleanup wsgi_apr_unix_file_cleanup
+#define apr_os_pipe_put_ex wsgi_apr_os_pipe_put_ex
+#endif
+
+#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
+typedef apr_uint16_t apr_wchar_t;
+extern apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr,
+ apr_size_t retlen,
+ const char* srcstr);
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_buckets.c b/src/server/wsgi_buckets.c
new file mode 100644
index 0000000..1d2f1e9
--- /dev/null
+++ b/src/server/wsgi_buckets.c
@@ -0,0 +1,174 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_buckets.h"
+
+#include "wsgi_interp.h"
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ char *base;
+ const char *application_group;
+ PyObject *string_object;
+ int decref_string;
+} wsgi_apr_bucket_python;
+
+/* ------------------------------------------------------------------------- */
+
+static void wsgi_python_bucket_destroy(void *data)
+{
+ wsgi_apr_bucket_python *h = data;
+
+ fprintf(stderr, "wsgi_python_bucket_destroy\n");
+
+ if (apr_bucket_shared_destroy(h)) {
+ if (h->decref_string) {
+ InterpreterObject *interp = NULL;
+
+ fprintf(stderr, "wsgi_apr_bucket_python_make #1\n");
+ fprintf(stderr, "application_group=%s\n", h->application_group);
+ fflush(stderr);
+
+ interp = wsgi_acquire_interpreter(h->application_group);
+ Py_DECREF(h->string_object);
+ wsgi_release_interpreter(interp);
+ }
+
+ apr_bucket_free(h);
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
+static apr_status_t wsgi_python_bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len,
+ apr_read_type_e block)
+{
+ wsgi_apr_bucket_python *h = b->data;
+
+ *str = h->base + b->start;
+ *len = b->length;
+ return APR_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static apr_bucket *wsgi_apr_bucket_python_make(apr_bucket *b,
+ const char *buf,
+ apr_size_t length,
+ const char *application_group,
+ PyObject *string_object,
+ int decref_string
+ )
+{
+ wsgi_apr_bucket_python *h;
+
+ fprintf(stderr, "wsgi_apr_bucket_python_make\n");
+ fprintf(stderr, "length=%zd\n", length);
+ fflush(stderr);
+
+ h = apr_bucket_alloc(sizeof(*h), b->list);
+
+ h->base = (char *)buf;
+ h->application_group = application_group;
+ h->string_object = string_object;
+ h->decref_string = decref_string;
+
+ b = apr_bucket_shared_make(b, h, 0, length);
+ b->type = &wsgi_apr_bucket_type_python;
+
+ return b;
+}
+
+/* ------------------------------------------------------------------------- */
+
+apr_bucket *wsgi_apr_bucket_python_create(const char *buf, apr_size_t length,
+ const char *application_group,
+ PyObject *string_object,
+ apr_bucket_alloc_t *list)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+
+ return wsgi_apr_bucket_python_make(b, buf, length, application_group,
+ string_object, 0);
+}
+
+/* ------------------------------------------------------------------------- */
+
+static apr_status_t wsgi_python_bucket_setaside(apr_bucket *b, apr_pool_t *p)
+{
+ wsgi_apr_bucket_python *h = b->data;
+
+ fprintf(stderr, "wsgi_python_bucket_setaside\n");
+ fflush(stderr);
+
+ if (h->decref_string) {
+ /*
+ * XXX Not sure if this is correct. Can't assume that if doing
+ * a set aside of a bucket which was already set aside that
+ * we aren't still in context of active interpreter.
+ */
+ InterpreterObject *interp = NULL;
+
+ fprintf(stderr, "wsgi_python_bucket_setaside #1\n");
+ fflush(stderr);
+
+ interp = wsgi_acquire_interpreter(h->application_group);
+ Py_INCREF(h->string_object);
+ wsgi_release_interpreter(interp);
+ }
+ else {
+ fprintf(stderr, "wsgi_python_bucket_setaside #2\n");
+ fflush(stderr);
+
+ Py_INCREF(h->string_object);
+ }
+
+ fprintf(stderr, "wsgi_python_bucket_setaside #3\n");
+ fprintf(stderr, "start=%lld\n", b->start);
+ fprintf(stderr, "length=%zd\n", b->length);
+ fflush(stderr);
+
+ wsgi_apr_bucket_python_make(b, (char *)h->base + b->start, b->length,
+ h->application_group, h->string_object, 1);
+
+ return APR_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+
+const apr_bucket_type_t wsgi_apr_bucket_type_python = {
+ "PYTHON", 5, APR_BUCKET_DATA,
+ wsgi_python_bucket_destroy,
+ wsgi_python_bucket_read,
+ wsgi_python_bucket_setaside,
+ apr_bucket_shared_split,
+ apr_bucket_shared_copy
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_buckets.h b/src/server/wsgi_buckets.h
new file mode 100644
index 0000000..07089fd
--- /dev/null
+++ b/src/server/wsgi_buckets.h
@@ -0,0 +1,40 @@
+#ifndef WSGI_BUCKETS_H
+#define WSGI_BUCKETS_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern const apr_bucket_type_t wsgi_apr_bucket_type_python;
+
+apr_bucket *wsgi_apr_bucket_python_create(const char *buf, apr_size_t length,
+ const char *application_group,
+ PyObject *string_object,
+ apr_bucket_alloc_t *list);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_convert.c b/src/server/wsgi_convert.c
new file mode 100644
index 0000000..d8e211f
--- /dev/null
+++ b/src/server/wsgi_convert.c
@@ -0,0 +1,161 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_convert.h"
+
+#include "wsgi_validate.h"
+
+/* ------------------------------------------------------------------------- */
+
+PyObject *wsgi_convert_string_to_bytes(PyObject *value)
+{
+ PyObject *result = NULL;
+
+#if PY_MAJOR_VERSION >= 3
+ if (!PyUnicode_Check(value)) {
+ PyErr_Format(PyExc_TypeError, "expected unicode object, value "
+ "of type %.200s found", value->ob_type->tp_name);
+ return NULL;
+ }
+
+ result = PyUnicode_AsLatin1String(value);
+
+ if (!result) {
+ PyErr_SetString(PyExc_ValueError, "unicode object contains non "
+ "latin-1 characters");
+ return NULL;
+ }
+#else
+ if (!PyBytes_Check(value)) {
+ PyErr_Format(PyExc_TypeError, "expected byte string object, "
+ "value of type %.200s found", value->ob_type->tp_name);
+ return NULL;
+ }
+
+ Py_INCREF(value);
+ result = value;
+#endif
+
+ return result;
+}
+
+/* ------------------------------------------------------------------------- */
+
+PyObject *wsgi_convert_status_line_to_bytes(PyObject *status_line)
+{
+ PyObject *result = NULL;
+
+ result = wsgi_convert_string_to_bytes(status_line);
+
+ if (!result)
+ return NULL;
+
+ if (!wsgi_validate_status_line(result)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ return result;
+}
+
+/* ------------------------------------------------------------------------- */
+
+PyObject *wsgi_convert_headers_to_bytes(PyObject *headers)
+{
+ PyObject *result = NULL;
+
+ int i;
+ int size;
+
+ if (!PyList_Check(headers)) {
+ PyErr_Format(PyExc_TypeError, "expected list object for headers, "
+ "value of type %.200s found", headers->ob_type->tp_name);
+ return 0;
+ }
+
+ size = PyList_Size(headers);
+ result = PyList_New(size);
+
+ for (i = 0; i < size; i++) {
+ PyObject *header = NULL;
+
+ PyObject *header_name = NULL;
+ PyObject *header_value = NULL;
+
+ PyObject *header_name_as_bytes = NULL;
+ PyObject *header_value_as_bytes = NULL;
+
+ PyObject *result_tuple = NULL;
+
+ header = PyList_GetItem(headers, i);
+
+ if (!PyTuple_Check(header)) {
+ PyErr_Format(PyExc_TypeError, "list of tuple values "
+ "expected for headers, value of type %.200s found",
+ header->ob_type->tp_name);
+ Py_DECREF(result);
+ return 0;
+ }
+
+ if (PyTuple_Size(header) != 2) {
+ PyErr_Format(PyExc_ValueError, "tuple of length 2 "
+ "expected for header, length is %d",
+ (int)PyTuple_Size(header));
+ Py_DECREF(result);
+ return 0;
+ }
+
+ result_tuple = PyTuple_New(2);
+ PyList_SET_ITEM(result, i, result_tuple);
+
+ header_name = PyTuple_GetItem(header, 0);
+ header_value = PyTuple_GetItem(header, 1);
+
+ header_name_as_bytes = wsgi_convert_string_to_bytes(header_name);
+
+ if (!header_name_as_bytes)
+ goto failure;
+
+ PyTuple_SET_ITEM(result_tuple, 0, header_name_as_bytes);
+
+ if (!wsgi_validate_header_name(header_name_as_bytes))
+ goto failure;
+
+ header_value_as_bytes = wsgi_convert_string_to_bytes(header_value);
+
+ if (!header_value_as_bytes)
+ goto failure;
+
+ PyTuple_SET_ITEM(result_tuple, 1, header_value_as_bytes);
+
+ if (!wsgi_validate_header_value(header_value_as_bytes))
+ goto failure;
+ }
+
+ return result;
+
+failure:
+ Py_DECREF(result);
+ return NULL;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_convert.h b/src/server/wsgi_convert.h
new file mode 100644
index 0000000..aa7ebae
--- /dev/null
+++ b/src/server/wsgi_convert.h
@@ -0,0 +1,36 @@
+#ifndef WSGI_CONVERT_H
+#define WSGI_CONVERT_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern PyObject *wsgi_convert_string_to_bytes(PyObject *value);
+extern PyObject *wsgi_convert_status_line_to_bytes(PyObject *headers);
+extern PyObject *wsgi_convert_headers_to_bytes(PyObject *headers);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_daemon.c b/src/server/wsgi_daemon.c
new file mode 100644
index 0000000..d435a4f
--- /dev/null
+++ b/src/server/wsgi_daemon.c
@@ -0,0 +1,39 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_daemon.h"
+
+/* ------------------------------------------------------------------------- */
+
+int wsgi_daemon_count = 0;
+apr_hash_t *wsgi_daemon_index = NULL;
+apr_hash_t *wsgi_daemon_listeners = NULL;
+
+WSGIDaemonProcess *wsgi_daemon_process = NULL;
+
+int volatile wsgi_request_count = 0;
+
+WSGIDaemonThread *wsgi_worker_threads = NULL;
+
+WSGIThreadStack *wsgi_worker_stack = NULL;
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_daemon.h b/src/server/wsgi_daemon.h
new file mode 100644
index 0000000..f19e4ea
--- /dev/null
+++ b/src/server/wsgi_daemon.h
@@ -0,0 +1,181 @@
+#ifndef WSGI_DAEMON_H
+#define WSGI_DAEMON_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+#ifndef WIN32
+#if APR_HAS_OTHER_CHILD && APR_HAS_THREADS && APR_HAS_FORK
+#define MOD_WSGI_WITH_DAEMONS 1
+#endif
+#endif
+
+/*
+ * Apache 2.X and UNIX specific definitions related to
+ * distinct daemon processes.
+ */
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+
+#include "unixd.h"
+#include "scoreboard.h"
+#include "mpm_common.h"
+#include "apr_proc_mutex.h"
+#include "apr_thread_cond.h"
+#include "apr_atomic.h"
+#include "http_connection.h"
+#include "apr_poll.h"
+#include "apr_signal.h"
+#include "http_vhost.h"
+
+#if APR_MAJOR_VERSION < 1
+#define apr_atomic_cas32 apr_atomic_cas
+#endif
+
+#if APR_HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#if APR_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_SEM_H
+#include <sys/sem.h>
+#endif
+
+#include <locale.h>
+#include <sys/un.h>
+
+#ifndef WSGI_LISTEN_BACKLOG
+#define WSGI_LISTEN_BACKLOG 100
+#endif
+
+#ifndef WSGI_CONNECT_ATTEMPTS
+#define WSGI_CONNECT_ATTEMPTS 15
+#endif
+
+#define WSGI_STACK_HEAD 0xffff
+#define WSGI_STACK_LAST 0xffff
+#define WSGI_STACK_TERMINATED 0x10000
+#define WSGI_STACK_NO_LISTENER 0x20000
+
+typedef struct {
+ server_rec *server;
+ long random;
+ int id;
+ const char *name;
+ const char *user;
+ uid_t uid;
+ const char *group;
+ gid_t gid;
+ const char *groups_list;
+ int groups_count;
+ gid_t *groups;
+ int processes;
+ int multiprocess;
+ int threads;
+ int umask;
+ const char *root;
+ const char *home;
+ const char *lang;
+ const char *locale;
+ const char *python_home;
+ const char *python_path;
+ const char *python_eggs;
+ int stack_size;
+ int maximum_requests;
+ int blocked_requests;
+ int shutdown_timeout;
+ apr_time_t deadlock_timeout;
+ apr_time_t inactivity_timeout;
+ apr_time_t blocked_timeout;
+ apr_time_t graceful_timeout;
+ int listen_backlog;
+ const char *display_name;
+ int send_buffer_size;
+ int recv_buffer_size;
+ const char *script_user;
+ const char *script_group;
+ int cpu_time_limit;
+ int cpu_priority;
+ rlim_t memory_limit;
+ rlim_t virtual_memory_limit;
+ const char *socket;
+ int listener_fd;
+ const char* mutex_path;
+ apr_proc_mutex_t* mutex;
+ const char *newrelic_config_file;
+ const char *newrelic_environment;
+} WSGIProcessGroup;
+
+typedef struct {
+ WSGIProcessGroup *group;
+ int instance;
+ apr_proc_t process;
+ apr_socket_t *listener;
+} WSGIDaemonProcess;
+
+typedef struct {
+ int id;
+ WSGIDaemonProcess *process;
+ apr_thread_t *thread;
+ int running;
+ int next;
+ int wakeup;
+ apr_thread_cond_t *condition;
+ apr_thread_mutex_t *mutex;
+} WSGIDaemonThread;
+
+typedef struct {
+ apr_uint32_t state;
+} WSGIThreadStack;
+
+typedef struct {
+ const char *name;
+ const char *socket;
+ int fd;
+} WSGIDaemonSocket;
+
+extern int wsgi_daemon_count;
+extern apr_hash_t *wsgi_daemon_index;
+extern apr_hash_t *wsgi_daemon_listeners;
+
+extern WSGIDaemonProcess *wsgi_daemon_process;
+
+extern int volatile wsgi_request_count;
+
+extern WSGIDaemonThread *wsgi_worker_threads;
+
+extern WSGIThreadStack *wsgi_worker_stack;
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c
new file mode 100644
index 0000000..9e8f8ce
--- /dev/null
+++ b/src/server/wsgi_interp.c
@@ -0,0 +1,1983 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_interp.h"
+
+#include "wsgi_version.h"
+
+#include "wsgi_apache.h"
+#include "wsgi_server.h"
+#include "wsgi_logger.h"
+#include "wsgi_restrict.h"
+#include "wsgi_stream.h"
+#include "wsgi_metrics.h"
+#include "wsgi_daemon.h"
+
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef WIN32
+#include <pwd.h>
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+/* Function to restrict access to use of signal(). */
+
+static PyObject *wsgi_signal_intercept(PyObject *self, PyObject *args)
+{
+ PyObject *h = NULL;
+ int n = 0;
+
+ PyObject *m = NULL;
+
+ if (!PyArg_ParseTuple(args, "iO:signal", &n, &h))
+ return NULL;
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Callback registration for "
+ "signal %d ignored.", getpid(), n);
+ Py_END_ALLOW_THREADS
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_stack");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+ Py_INCREF(o);
+ log = newLogObject(NULL, APLOG_WARNING, NULL);
+ args = Py_BuildValue("(OOO)", Py_None, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_XDECREF(result);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ Py_DECREF(o);
+ }
+ }
+
+ Py_XDECREF(m);
+
+ Py_INCREF(h);
+
+ return h;
+}
+
+static PyMethodDef wsgi_signal_method[] = {
+ { "signal", (PyCFunction)wsgi_signal_intercept, METH_VARARGS, 0 },
+ { NULL, NULL }
+};
+
+/* Wrapper around Python interpreter instances. */
+
+const char *wsgi_python_path = NULL;
+const char *wsgi_python_eggs = NULL;
+
+#if APR_HAS_THREADS
+int wsgi_thread_count = 0;
+apr_threadkey_t *wsgi_thread_key;
+#endif
+
+PyTypeObject Interpreter_Type;
+
+InterpreterObject *newInterpreterObject(const char *name)
+{
+ PyInterpreterState *interp = NULL;
+ InterpreterObject *self = NULL;
+ PyThreadState *tstate = NULL;
+ PyThreadState *save_tstate = NULL;
+ PyObject *module = NULL;
+ PyObject *object = NULL;
+ PyObject *item = NULL;
+
+ int max_threads = 0;
+ int max_processes = 0;
+ int is_threaded = 0;
+ int is_forked = 0;
+
+ /* Create handle for interpreter and local data. */
+
+ self = PyObject_New(InterpreterObject, &Interpreter_Type);
+ if (self == NULL)
+ return NULL;
+
+ /*
+ * If interpreter not named, then we want to bind
+ * to the first Python interpreter instance created.
+ * Give this interpreter an empty string as name.
+ */
+
+ if (!name) {
+ interp = PyInterpreterState_Head();
+ while (interp->next)
+ interp = interp->next;
+
+ name = "";
+ }
+
+ /* Save away the interpreter name. */
+
+ self->name = strdup(name);
+
+ if (interp) {
+ /*
+ * Interpreter provided to us so will not be
+ * responsible for deleting it later. This will
+ * be the case for the main Python interpreter.
+ */
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Attach interpreter '%s'.",
+ getpid(), name);
+
+ self->interp = interp;
+ self->owner = 0;
+ }
+ else {
+ /*
+ * Remember active thread state so can restore
+ * it. This is actually the thread state
+ * associated with simplified GIL state API.
+ */
+
+ save_tstate = PyThreadState_Swap(NULL);
+
+ /*
+ * Create the interpreter. If creation of the
+ * interpreter fails it will restore the
+ * existing active thread state for us so don't
+ * need to worry about it in that case.
+ */
+
+ tstate = Py_NewInterpreter();
+
+ if (!tstate) {
+ PyErr_SetString(PyExc_RuntimeError, "Py_NewInterpreter() failed");
+
+ Py_DECREF(self);
+
+ return NULL;
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Create interpreter '%s'.",
+ getpid(), name);
+ Py_END_ALLOW_THREADS
+
+ self->interp = tstate->interp;
+ self->owner = 1;
+ }
+
+ /*
+ * Install restricted objects for STDIN and STDOUT,
+ * or log object for STDOUT as appropriate. Don't do
+ * this if not running on Win32 and we believe we
+ * are running in single process mode, otherwise
+ * it prevents use of interactive debuggers such as
+ * the 'pdb' module.
+ */
+
+ object = newLogObject(NULL, APLOG_ERR, "stderr");
+ PySys_SetObject("stderr", object);
+ Py_DECREF(object);
+
+#ifndef WIN32
+ if (wsgi_parent_pid != getpid()) {
+#endif
+ if (wsgi_server_config->restrict_stdout == 1) {
+ object = (PyObject *)newRestrictedObject("sys.stdout");
+ PySys_SetObject("stdout", object);
+ Py_DECREF(object);
+ }
+ else {
+ object = newLogObject(NULL, APLOG_ERR, "stdout");
+ PySys_SetObject("stdout", object);
+ Py_DECREF(object);
+ }
+
+ if (wsgi_server_config->restrict_stdin == 1) {
+ object = (PyObject *)newRestrictedObject("sys.stdin");
+ PySys_SetObject("stdin", object);
+ Py_DECREF(object);
+ }
+#ifndef WIN32
+ }
+#endif
+
+ /*
+ * Set sys.argv to one element list to fake out
+ * modules that look there for Python command
+ * line arguments as appropriate.
+ */
+
+ object = PyList_New(0);
+#if PY_MAJOR_VERSION >= 3
+ item = PyUnicode_FromString("mod_wsgi");
+#else
+ item = PyString_FromString("mod_wsgi");
+#endif
+ PyList_Append(object, item);
+ PySys_SetObject("argv", object);
+ Py_DECREF(item);
+ Py_DECREF(object);
+
+ /*
+ * Install intercept for signal handler registration
+ * if appropriate.
+ */
+
+ if (wsgi_server_config->restrict_signal != 0) {
+ module = PyImport_ImportModule("signal");
+ PyModule_AddObject(module, "signal", PyCFunction_New(
+ &wsgi_signal_method[0], NULL));
+ Py_DECREF(module);
+ }
+
+ /*
+ * Force loading of codecs into interpreter. This has to be
+ * done as not otherwise done in sub interpreters and if not
+ * done, code running in sub interpreters can fail on some
+ * platforms if a unicode string is added in sys.path and an
+ * import then done.
+ */
+
+ item = PyCodec_Encoder("ascii");
+ Py_XDECREF(item);
+
+ /*
+ * If running in daemon process, override as appropriate
+ * the USER, USERNAME or LOGNAME environment variables
+ * so that they match the user that the process is running
+ * as. Need to do this else we inherit the value from the
+ * Apache parent process which is likely wrong as will be
+ * root or the user than ran sudo when Apache started.
+ * Can't update these for normal Apache child processes
+ * as that would change the expected environment of other
+ * Apache modules.
+ */
+
+#ifndef WIN32
+ if (wsgi_daemon_pool) {
+ module = PyImport_ImportModule("os");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "environ");
+
+ if (object) {
+ struct passwd *pwent;
+
+ pwent = getpwuid(geteuid());
+
+ if (getenv("USER")) {
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("USER");
+ value = PyUnicode_Decode(pwent->pw_name,
+ strlen(pwent->pw_name),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ key = PyString_FromString("USER");
+ value = PyString_FromString(pwent->pw_name);
+#endif
+
+ PyObject_SetItem(object, key, value);
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ }
+
+ if (getenv("USERNAME")) {
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("USERNAME");
+ value = PyUnicode_Decode(pwent->pw_name,
+ strlen(pwent->pw_name),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ key = PyString_FromString("USERNAME");
+ value = PyString_FromString(pwent->pw_name);
+#endif
+
+ PyObject_SetItem(object, key, value);
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ }
+
+ if (getenv("LOGNAME")) {
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("LOGNAME");
+ value = PyUnicode_Decode(pwent->pw_name,
+ strlen(pwent->pw_name),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ key = PyString_FromString("LOGNAME");
+ value = PyString_FromString(pwent->pw_name);
+#endif
+
+ PyObject_SetItem(object, key, value);
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ }
+ }
+
+ Py_DECREF(module);
+ }
+ }
+#endif
+
+ /*
+ * If running in daemon process, override HOME environment
+ * variable so that is matches the home directory of the
+ * user that the process is running as. Need to do this as
+ * Apache will inherit HOME from root user or user that ran
+ * sudo and started Apache and this would be wrong. Can't
+ * update HOME for normal Apache child processes as that
+ * would change the expected environment of other Apache
+ * modules.
+ */
+
+#ifndef WIN32
+ if (wsgi_daemon_pool) {
+ module = PyImport_ImportModule("os");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "environ");
+
+ if (object) {
+ struct passwd *pwent;
+
+ pwent = getpwuid(geteuid());
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("HOME");
+ value = PyUnicode_Decode(pwent->pw_dir, strlen(pwent->pw_dir),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ key = PyString_FromString("HOME");
+ value = PyString_FromString(pwent->pw_dir);
+#endif
+
+ PyObject_SetItem(object, key, value);
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ }
+
+ Py_DECREF(module);
+ }
+ }
+#endif
+
+ /*
+ * Explicitly override the PYTHON_EGG_CACHE variable if it
+ * was defined by Apache configuration. For embedded processes
+ * this would have been done by using WSGIPythonEggs directive.
+ * For daemon processes the 'python-eggs' option to the
+ * WSGIDaemonProcess directive would have needed to be used.
+ */
+
+ if (!wsgi_daemon_pool)
+ wsgi_python_eggs = wsgi_server_config->python_eggs;
+
+ if (wsgi_python_eggs) {
+ module = PyImport_ImportModule("os");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "environ");
+
+ if (object) {
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("PYTHON_EGG_CACHE");
+ value = PyUnicode_Decode(wsgi_python_eggs,
+ strlen(wsgi_python_eggs),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ key = PyString_FromString("PYTHON_EGG_CACHE");
+ value = PyString_FromString(wsgi_python_eggs);
+#endif
+
+ PyObject_SetItem(object, key, value);
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ }
+
+ Py_DECREF(module);
+ }
+ }
+
+ /*
+ * Install user defined Python module search path. This is
+ * added using site.addsitedir() so that any Python .pth
+ * files are opened and additional directories so defined
+ * are added to default Python search path as well. This
+ * allows virtual Python environments to work. Note that
+ * site.addsitedir() adds new directories at the end of
+ * sys.path when they really need to be added in order at
+ * the start. We therefore need to do a fiddle and shift
+ * any newly added directories to the start of sys.path.
+ */
+
+ if (!wsgi_daemon_pool)
+ wsgi_python_path = wsgi_server_config->python_path;
+
+ if (wsgi_python_path) {
+ PyObject *path = NULL;
+
+ module = PyImport_ImportModule("site");
+ path = PySys_GetObject("path");
+
+ if (module && path) {
+ PyObject *dict = NULL;
+
+ PyObject *old = NULL;
+ PyObject *new = NULL;
+ PyObject *tmp = NULL;
+
+ PyObject *item = NULL;
+
+ int i = 0;
+
+ old = PyList_New(0);
+ new = PyList_New(0);
+ tmp = PyList_New(0);
+
+ for (i=0; i<PyList_Size(path); i++)
+ PyList_Append(old, PyList_GetItem(path, i));
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "addsitedir");
+
+ if (object) {
+ const char *start;
+ const char *end;
+ const char *value;
+
+ PyObject *item;
+ PyObject *args;
+
+ PyObject *result = NULL;
+
+ Py_INCREF(object);
+
+ start = wsgi_python_path;
+ end = strchr(start, DELIM);
+
+ if (end) {
+#if PY_MAJOR_VERSION >= 3
+ item = PyUnicode_Decode(start, end-start,
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ item = PyString_FromStringAndSize(start, end-start);
+#endif
+ start = end+1;
+
+ value = PyString_AsString(item);
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Adding '%s' to "
+ "path.", getpid(), value);
+ Py_END_ALLOW_THREADS
+
+ args = Py_BuildValue("(O)", item);
+ result = PyEval_CallObject(object, args);
+
+ if (!result) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Call to "
+ "'site.addsitedir()' failed for '%s', "
+ "stopping.", getpid(), value);
+ Py_END_ALLOW_THREADS
+ }
+
+ Py_XDECREF(result);
+ Py_DECREF(item);
+ Py_DECREF(args);
+
+ end = strchr(start, DELIM);
+
+ while (result && end) {
+#if PY_MAJOR_VERSION >= 3
+ item = PyUnicode_Decode(start, end-start,
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ item = PyString_FromStringAndSize(start, end-start);
+#endif
+ start = end+1;
+
+ value = PyString_AsString(item);
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Adding '%s' to "
+ "path.", getpid(), value);
+ Py_END_ALLOW_THREADS
+
+ args = Py_BuildValue("(O)", item);
+ result = PyEval_CallObject(object, args);
+
+ if (!result) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0,
+ wsgi_server, "mod_wsgi (pid=%d): "
+ "Call to 'site.addsitedir()' failed "
+ "for '%s', stopping.",
+ getpid(), value);
+ Py_END_ALLOW_THREADS
+ }
+
+ Py_XDECREF(result);
+ Py_DECREF(item);
+ Py_DECREF(args);
+
+ end = strchr(start, DELIM);
+ }
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Adding '%s' to "
+ "path.", getpid(), start);
+ Py_END_ALLOW_THREADS
+
+ args = Py_BuildValue("(s)", start);
+ result = PyEval_CallObject(object, args);
+
+ if (!result) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Call to "
+ "'site.addsitedir()' failed for '%s'.",
+ getpid(), start);
+ Py_END_ALLOW_THREADS
+ }
+
+ Py_XDECREF(result);
+ Py_DECREF(args);
+
+ Py_DECREF(object);
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to locate "
+ "'site.addsitedir()'.", getpid());
+ Py_END_ALLOW_THREADS
+ }
+
+ for (i=0; i<PyList_Size(path); i++)
+ PyList_Append(tmp, PyList_GetItem(path, i));
+
+ for (i=0; i<PyList_Size(tmp); i++) {
+ item = PyList_GetItem(tmp, i);
+ if (!PySequence_Contains(old, item)) {
+ int index = PySequence_Index(path, item);
+ PyList_Append(new, item);
+ if (index != -1)
+ PySequence_DelItem(path, index);
+ }
+ }
+
+ PyList_SetSlice(path, 0, 0, new);
+
+ Py_DECREF(old);
+ Py_DECREF(new);
+ Py_DECREF(tmp);
+ }
+ else {
+ if (!module) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to import 'site' "
+ "module.", getpid());
+ Py_END_ALLOW_THREADS
+ }
+
+ if (!path) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Lookup for 'sys.path' "
+ "failed.", getpid());
+ Py_END_ALLOW_THREADS
+ }
+ }
+
+ Py_XDECREF(module);
+ }
+
+ /*
+ * If running in daemon mode and a home directory was set then
+ * insert empty an empty string at the start of the Python module
+ * search path so the current working directory will be searched.
+ * This makes things similar to when using the Python interpreter
+ * on the command line. If the current working directory changes
+ * then where it looks follows, so doesn't always look in home.
+ */
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ if (wsgi_daemon_process && wsgi_daemon_process->group->home) {
+ PyObject *path = NULL;
+
+ path = PySys_GetObject("path");
+
+ if (module && path) {
+ PyObject *empty;
+
+ empty = PyString_FromString("");
+ PyList_Insert(path, 0, empty);
+ Py_DECREF(empty);
+ }
+
+ Py_XDECREF(module);
+ }
+#endif
+
+ /*
+ * Create 'mod_wsgi' Python module. We first try and import an
+ * external Python module of the same name. The intent is
+ * that this external module would provide optional features
+ * implementable using pure Python code. Don't want to
+ * include them in the main Apache mod_wsgi package as that
+ * complicates that package and also wouldn't allow them to
+ * be released to a separate schedule. It is easier for
+ * people to replace Python modules package with a new
+ * version than it is to replace Apache module package.
+ */
+
+ module = PyImport_ImportModule("mod_wsgi");
+
+ if (!module) {
+ PyObject *modules = NULL;
+
+ modules = PyImport_GetModuleDict();
+ module = PyDict_GetItemString(modules, "mod_wsgi");
+
+ if (module) {
+ PyErr_Print();
+
+ PyDict_DelItemString(modules, "mod_wsgi");
+ }
+
+ PyErr_Clear();
+
+ module = PyImport_AddModule("mod_wsgi");
+
+ Py_INCREF(module);
+ }
+ else if (!*name) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Imported 'mod_wsgi'.",
+ getpid());
+ Py_END_ALLOW_THREADS
+ }
+
+ /*
+ * Add Apache module version information to the Python
+ * 'mod_wsgi' module.
+ */
+
+ PyModule_AddObject(module, "version", Py_BuildValue("(ii)",
+ MOD_WSGI_MAJORVERSION_NUMBER,
+ MOD_WSGI_MINORVERSION_NUMBER));
+
+ /* Add type object for file wrapper. */
+
+ Py_INCREF(&Stream_Type);
+ PyModule_AddObject(module, "FileWrapper", (PyObject *)&Stream_Type);
+
+ /*
+ * Add information about process group and application
+ * group to the Python 'mod_wsgi' module.
+ */
+
+#if PY_MAJOR_VERSION >= 3
+ PyModule_AddObject(module, "process_group",
+ PyUnicode_DecodeLatin1(wsgi_daemon_group,
+ strlen(wsgi_daemon_group), NULL));
+ PyModule_AddObject(module, "application_group",
+ PyUnicode_DecodeLatin1(name, strlen(name), NULL));
+#else
+ PyModule_AddObject(module, "process_group",
+ PyString_FromString(wsgi_daemon_group));
+ PyModule_AddObject(module, "application_group",
+ PyString_FromString(name));
+#endif
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ if (wsgi_daemon_process) {
+ object = PyLong_FromLong(wsgi_daemon_process->group->processes);
+ PyModule_AddObject(module, "maximum_processes", object);
+
+ object = PyLong_FromLong(wsgi_daemon_process->group->threads);
+ PyModule_AddObject(module, "threads_per_process", object);
+ }
+ else {
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded);
+ if (is_threaded != AP_MPMQ_NOT_SUPPORTED) {
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads);
+ }
+ ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked);
+ if (is_forked != AP_MPMQ_NOT_SUPPORTED) {
+ ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_processes);
+ if (max_processes == -1) {
+ ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_processes);
+ }
+ }
+
+ max_threads = (max_threads <= 0) ? 1 : max_threads;
+ max_processes = (max_processes <= 0) ? 1 : max_processes;
+
+ object = PyLong_FromLong(max_processes);
+ PyModule_AddObject(module, "maximum_processes", object);
+
+ object = PyLong_FromLong(max_threads);
+ PyModule_AddObject(module, "threads_per_process", object);
+ }
+#else
+ ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded);
+ if (is_threaded != AP_MPMQ_NOT_SUPPORTED) {
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads);
+ }
+ ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked);
+ if (is_forked != AP_MPMQ_NOT_SUPPORTED) {
+ ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_processes);
+ if (max_processes == -1) {
+ ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_processes);
+ }
+ }
+
+ max_threads = (max_threads <= 0) ? 1 : max_threads;
+ max_processes = (max_processes <= 0) ? 1 : max_processes;
+
+ object = PyLong_FromLong(max_processes);
+ PyModule_AddObject(module, "maximum_processes", object);
+
+ object = PyLong_FromLong(max_threads);
+ PyModule_AddObject(module, "threads_per_process", object);
+#endif
+
+ PyModule_AddObject(module, "thread_utilization", PyCFunction_New(
+ &wsgi_get_utilization_method[0], NULL));
+
+ Py_DECREF(module);
+
+ /*
+ * Create 'apache' Python module. If this is not a daemon
+ * process and it is the first interpreter created by
+ * Python, we first try and import an external Python module
+ * of the same name. The intent is that this external module
+ * would provide the SWIG bindings for the internal Apache
+ * APIs. Only support use of such bindings in the first
+ * interpreter created due to threading issues in SWIG
+ * generated.
+ */
+
+ module = NULL;
+
+ if (!wsgi_daemon_pool) {
+ module = PyImport_ImportModule("apache");
+
+ if (!module) {
+ PyObject *modules = NULL;
+
+ modules = PyImport_GetModuleDict();
+ module = PyDict_GetItemString(modules, "apache");
+
+ if (module) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to import "
+ "'apache' extension module.", getpid());
+ Py_END_ALLOW_THREADS
+
+ PyErr_Print();
+
+ PyDict_DelItemString(modules, "apache");
+
+ module = NULL;
+ }
+
+ PyErr_Clear();
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Imported 'apache'.",
+ getpid());
+ Py_END_ALLOW_THREADS
+ }
+ }
+
+ if (!module) {
+ module = PyImport_AddModule("apache");
+
+ Py_INCREF(module);
+ }
+
+ /*
+ * Add Apache version information to the Python 'apache'
+ * module.
+ */
+
+ PyModule_AddObject(module, "version", Py_BuildValue("(ii)",
+ AP_SERVER_MAJORVERSION_NUMBER,
+ AP_SERVER_MINORVERSION_NUMBER));
+
+ Py_DECREF(module);
+
+ /*
+ * If support for New Relic monitoring is enabled then
+ * import New Relic agent module and initialise it.
+ */
+
+ if (!wsgi_daemon_pool) {
+ wsgi_newrelic_config_file = wsgi_server_config->newrelic_config_file;
+ wsgi_newrelic_environment = wsgi_server_config->newrelic_environment;
+ }
+
+ if (wsgi_newrelic_config_file) {
+ PyObject *dict = NULL;
+
+ module = PyImport_ImportModule("newrelic.agent");
+
+ if (module) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d, process='%s', application='%s'): "
+ "Imported 'newrelic.agent'.", getpid(),
+ wsgi_daemon_group , name);
+ Py_END_ALLOW_THREADS
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "initialize");
+
+ if (object) {
+ PyObject *config_file = NULL;
+ PyObject *environment = NULL;
+ PyObject *result = NULL;
+
+#if PY_MAJOR_VERSION >= 3
+ config_file = PyUnicode_Decode(wsgi_newrelic_config_file,
+ strlen(wsgi_newrelic_config_file),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ config_file = PyString_FromString(wsgi_newrelic_config_file);
+#endif
+
+ if (wsgi_newrelic_environment) {
+#if PY_MAJOR_VERSION >= 3
+ environment = PyUnicode_Decode(wsgi_newrelic_environment,
+ strlen(wsgi_newrelic_environment),
+ Py_FileSystemDefaultEncoding,
+ "surrogateescape");
+#else
+ environment = PyString_FromString(
+ wsgi_newrelic_environment);
+#endif
+ }
+ else {
+ Py_INCREF(Py_None);
+ environment = Py_None;
+ }
+
+ result = PyObject_CallFunctionObjArgs(object, config_file,
+ environment, NULL);
+
+ if (!result) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to initialise "
+ "New Relic agent with config '%s'.", getpid(),
+ wsgi_newrelic_config_file);
+ Py_END_ALLOW_THREADS
+ }
+
+ Py_DECREF(config_file);
+ Py_DECREF(environment);
+
+ Py_XDECREF(result);
+
+ Py_DECREF(object);
+ }
+
+ Py_XDECREF(module);
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Unable to import "
+ "'newrelic.agent' module.", getpid());
+ Py_END_ALLOW_THREADS
+
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ }
+
+ /*
+ * Restore previous thread state. Only need to do
+ * this where had to create a new interpreter. This
+ * is basically anything except the first Python
+ * interpreter instance. We need to restore it in
+ * these cases as came into the function holding the
+ * simplified GIL state for this thread but creating
+ * the interpreter has resulted in a new thread
+ * state object being created bound to the newly
+ * created interpreter. In doing this though we want
+ * to cache the thread state object which has been
+ * created when interpreter is created. This is so
+ * it can be reused later ensuring that thread local
+ * data persists between requests.
+ */
+
+ if (self->owner) {
+#if APR_HAS_THREADS
+ int thread_id = 0;
+ int *thread_handle = NULL;
+
+ self->tstate_table = apr_hash_make(wsgi_server->process->pool);
+
+ apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
+
+ if (!thread_handle) {
+ thread_id = wsgi_thread_count++;
+ thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
+ &thread_id, sizeof(thread_id));
+ apr_threadkey_private_set(thread_handle, wsgi_thread_key);
+ }
+ else {
+ thread_id = *thread_handle;
+ }
+
+ if (wsgi_server_config->verbose_debugging) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Bind thread state for "
+ "thread %d against interpreter '%s'.", getpid(),
+ thread_id, self->name);
+ }
+
+ apr_hash_set(self->tstate_table, thread_handle,
+ sizeof(*thread_handle), tstate);
+
+ PyThreadState_Swap(save_tstate);
+#else
+ self->tstate = tstate;
+ PyThreadState_Swap(save_tstate);
+#endif
+ }
+
+ return self;
+}
+
+static void Interpreter_dealloc(InterpreterObject *self)
+{
+ PyThreadState *tstate = NULL;
+ PyObject *exitfunc = NULL;
+ PyObject *module = NULL;
+
+ PyThreadState *tstate_enter = NULL;
+
+ /*
+ * We should always enter here with the Python GIL
+ * held and an active thread state. This should only
+ * now occur when shutting down interpreter and not
+ * when releasing interpreter as don't support
+ * recyling of interpreters within the process. Thus
+ * the thread state should be that for the main
+ * Python interpreter. Where dealing with a named
+ * sub interpreter, we need to change the thread
+ * state to that which was originally used to create
+ * that sub interpreter before doing anything.
+ */
+
+ tstate_enter = PyThreadState_Get();
+
+ if (*self->name) {
+#if APR_HAS_THREADS
+ int thread_id = 0;
+ int *thread_handle = NULL;
+
+ apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
+
+ if (!thread_handle) {
+ thread_id = wsgi_thread_count++;
+ thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
+ &thread_id, sizeof(thread_id));
+ apr_threadkey_private_set(thread_handle, wsgi_thread_key);
+ }
+ else {
+ thread_id = *thread_handle;
+ }
+
+ tstate = apr_hash_get(self->tstate_table, &thread_id,
+ sizeof(thread_id));
+
+ if (!tstate) {
+ tstate = PyThreadState_New(self->interp);
+
+ if (wsgi_server_config->verbose_debugging) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Create thread state for "
+ "thread %d against interpreter '%s'.", getpid(),
+ thread_id, self->name);
+ }
+
+ apr_hash_set(self->tstate_table, thread_handle,
+ sizeof(*thread_handle), tstate);
+ }
+#else
+ tstate = self->tstate;
+#endif
+
+ /*
+ * Swap to interpreter thread state that was used when
+ * the sub interpreter was created.
+ */
+
+ PyThreadState_Swap(tstate);
+ }
+
+ if (self->owner) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Destroy interpreter '%s'.",
+ getpid(), self->name);
+ Py_END_ALLOW_THREADS
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Cleanup interpreter '%s'.",
+ getpid(), self->name);
+ Py_END_ALLOW_THREADS
+ }
+
+ /*
+ * Because the thread state we are using was created outside
+ * of any Python code and is not the same as the Python main
+ * thread, there is no record of it within the 'threading'
+ * module. We thus need to access current thread function of
+ * the 'threading' module to force it to create a thread
+ * handle for the thread. If we do not do this, then the
+ * 'threading' modules exit function will always fail
+ * because it will not be able to find a handle for this
+ * thread.
+ */
+
+ module = PyImport_ImportModule("threading");
+
+ if (!module)
+ PyErr_Clear();
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *func = NULL;
+
+ dict = PyModule_GetDict(module);
+#if PY_MAJOR_VERSION >= 3
+ func = PyDict_GetItemString(dict, "current_thread");
+#else
+ func = PyDict_GetItemString(dict, "currentThread");
+#endif
+ if (func) {
+ PyObject *res = NULL;
+ Py_INCREF(func);
+ res = PyEval_CallObject(func, (PyObject *)NULL);
+ if (!res) {
+ PyErr_Clear();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(func);
+ }
+ }
+
+ /*
+ * In Python 2.5.1 an exit function is no longer used to
+ * shutdown and wait on non daemon threads which were created
+ * from Python code. Instead, in Py_Main() it explicitly
+ * calls 'threading._shutdown()'. Thus need to emulate this
+ * behaviour for those versions.
+ */
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *func = NULL;
+
+ dict = PyModule_GetDict(module);
+ func = PyDict_GetItemString(dict, "_shutdown");
+ if (func) {
+ PyObject *res = NULL;
+ Py_INCREF(func);
+ res = PyEval_CallObject(func, (PyObject *)NULL);
+
+ if (res == NULL) {
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exception occurred within "
+ "threading._shutdown().", getpid());
+ Py_END_ALLOW_THREADS
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ log = newLogObject(NULL, APLOG_ERR, NULL);
+ args = Py_BuildValue("(OOOOO)", type, value,
+ traceback, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ Py_DECREF(o);
+ }
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_XDECREF(m);
+ }
+
+ Py_XDECREF(res);
+ Py_DECREF(func);
+ }
+ }
+
+ /* Finally done with 'threading' module. */
+
+ Py_XDECREF(module);
+
+ /*
+ * Invoke exit functions by calling sys.exitfunc() for
+ * Python 2.X and atexit._run_exitfuncs() for Python 3.X.
+ * Note that in Python 3.X we can't call this on main Python
+ * interpreter as for Python 3.X it doesn't deregister
+ * functions as called, so have no choice but to rely on
+ * Py_Finalize() to do it for the main interpreter. Now
+ * that simplified GIL state API usage sorted out, this
+ * should be okay.
+ */
+
+ module = NULL;
+
+#if PY_MAJOR_VERSION >= 3
+ if (self->owner) {
+ module = PyImport_ImportModule("atexit");
+
+ if (module) {
+ PyObject *dict = NULL;
+
+ dict = PyModule_GetDict(module);
+ exitfunc = PyDict_GetItemString(dict, "_run_exitfuncs");
+ }
+ else
+ PyErr_Clear();
+ }
+#else
+ exitfunc = PySys_GetObject("exitfunc");
+#endif
+
+ if (exitfunc) {
+ PyObject *res = NULL;
+ Py_INCREF(exitfunc);
+ PySys_SetObject("exitfunc", (PyObject *)NULL);
+ res = PyEval_CallObject(exitfunc, (PyObject *)NULL);
+
+ if (res == NULL) {
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): SystemExit exception "
+ "raised by exit functions ignored.", getpid());
+ Py_END_ALLOW_THREADS
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exception occurred within "
+ "exit functions.", getpid());
+ Py_END_ALLOW_THREADS
+ }
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *log = NULL;
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ log = newLogObject(NULL, APLOG_ERR, NULL);
+ args = Py_BuildValue("(OOOOO)", type, value,
+ traceback, Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ Py_DECREF(log);
+ Py_DECREF(o);
+ }
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_XDECREF(m);
+ }
+
+ Py_XDECREF(res);
+ Py_DECREF(exitfunc);
+ }
+
+ Py_XDECREF(module);
+
+ /* If we own it, we destroy it. */
+
+ if (self->owner) {
+ /*
+ * We need to destroy all the thread state objects
+ * associated with the interpreter. If there are
+ * background threads that were created then this
+ * may well cause them to crash the next time they
+ * try to run. Only saving grace is that we are
+ * trying to shutdown the process.
+ */
+
+ PyThreadState *tstate_save = tstate;
+ PyThreadState *tstate_next = NULL;
+
+ PyThreadState_Swap(NULL);
+
+ tstate = tstate->interp->tstate_head;
+ while (tstate) {
+ tstate_next = tstate->next;
+ if (tstate != tstate_save) {
+ PyThreadState_Swap(tstate);
+ PyThreadState_Clear(tstate);
+ PyThreadState_Swap(NULL);
+ PyThreadState_Delete(tstate);
+ }
+ tstate = tstate_next;
+ }
+
+ tstate = tstate_save;
+
+ PyThreadState_Swap(tstate);
+
+ /* Can now destroy the interpreter. */
+
+ Py_EndInterpreter(tstate);
+
+ PyThreadState_Swap(tstate_enter);
+ }
+
+ free(self->name);
+
+ PyObject_Del(self);
+}
+
+PyTypeObject Interpreter_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "mod_wsgi.Interpreter", /*tp_name*/
+ sizeof(InterpreterObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Interpreter_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/*
+ * Startup and shutdown of Python interpreter. In mod_wsgi if
+ * the Python interpreter hasn't been initialised by another
+ * Apache module such as mod_python, we will take control and
+ * initialise it. Need to remember that we initialised Python
+ * and whether done in parent or child process as when done in
+ * the parent we also take responsibility for performing special
+ * Python fixups after Apache is forked and child process has
+ * run.
+ *
+ * Note that by default we now defer initialisation of Python
+ * until after the fork of processes as Python 3.X by design
+ * doesn't clean up properly when it is destroyed causing
+ * significant memory leaks into Apache parent process on an
+ * Apache restart. Some Python 2.X versions also have real
+ * memory leaks but not near as much. The result of deferring
+ * initialisation is that can't benefit from copy on write
+ * semantics for loaded data across a fork. Each process will
+ * therefore have higher memory requirement where Python needs
+ * to be used.
+ */
+
+int wsgi_python_initialized = 0;
+
+#if defined(MOD_WSGI_DISABLE_EMBEDDED)
+int wsgi_python_required = 0;
+#else
+int wsgi_python_required = -1;
+#endif
+
+int wsgi_python_after_fork = 1;
+
+void wsgi_python_version(void)
+{
+ const char *compile = PY_VERSION;
+ const char *dynamic = 0;
+
+ dynamic = strtok((char *)Py_GetVersion(), " ");
+
+ if (strcmp(compile, dynamic) != 0) {
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server,
+ "mod_wsgi: Compiled for Python/%s.", compile);
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server,
+ "mod_wsgi: Runtime using Python/%s.", dynamic);
+ }
+}
+
+apr_status_t wsgi_python_term(void)
+{
+ PyObject *module = NULL;
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Terminating Python.", getpid());
+
+ /*
+ * We should be executing in the main thread again at this
+ * point but without the GIL, so simply restore the original
+ * thread state for that thread that we remembered when we
+ * initialised the interpreter.
+ */
+
+ PyEval_AcquireThread(wsgi_main_tstate);
+
+ /*
+ * Work around bug in Python 3.X whereby it will crash if
+ * atexit imported into sub interpreter, but never imported
+ * into main interpreter before calling Py_Finalize(). We
+ * perform an import of atexit module and it as side effect
+ * must be performing required initialisation.
+ */
+
+ module = PyImport_ImportModule("atexit");
+ Py_XDECREF(module);
+
+ /*
+ * In Python 2.6.5 and Python 3.1.2 the shutdown of
+ * threading was moved back into Py_Finalize() for the main
+ * Python interpreter. Because we shutting down threading
+ * ourselves, the second call results in errors being logged
+ * when Py_Finalize() is called and the shutdown function
+ * called a second time. The errors don't indicate any real
+ * problem and the threading module ignores them anyway.
+ * Whether we are using Python with this changed behaviour
+ * can only be checked by looking at run time version.
+ * Rather than try and add a dynamic check, create a fake
+ * 'dummy_threading' module as the presence of that shuts up
+ * the messages. It doesn't matter that the rest of the
+ * shutdown function still runs as everything is already
+ * stopped so doesn't do anything.
+ */
+
+ if (!PyImport_AddModule("dummy_threading"))
+ PyErr_Clear();
+
+ /* Shutdown Python interpreter completely. */
+
+ Py_Finalize();
+
+ wsgi_python_initialized = 0;
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Python has shutdown.", getpid());
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t wsgi_python_parent_cleanup(void *data)
+{
+ if (wsgi_parent_pid == getpid()) {
+ /*
+ * Destroy Python itself including the main
+ * interpreter. If mod_python is being loaded it
+ * is left to mod_python to destroy Python,
+ * although it currently doesn't do so.
+ */
+
+ if (wsgi_python_initialized)
+ wsgi_python_term();
+ }
+
+ return APR_SUCCESS;
+}
+
+
+void wsgi_python_init(apr_pool_t *p)
+{
+ const char *python_home = 0;
+
+ /* Perform initialisation if required. */
+
+ if (!Py_IsInitialized()) {
+
+ /* Enable Python 3.0 migration warnings. */
+
+#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
+ if (wsgi_server_config->py3k_warning_flag == 1)
+ Py_Py3kWarningFlag++;
+#endif
+
+ /* Disable writing of byte code files. */
+
+#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6
+ if (wsgi_server_config->dont_write_bytecode == 1)
+ Py_DontWriteBytecodeFlag++;
+#endif
+
+ /* Check for Python paths and optimisation flag. */
+
+ if (wsgi_server_config->python_optimize > 0)
+ Py_OptimizeFlag = wsgi_server_config->python_optimize;
+ else
+ Py_OptimizeFlag = 0;
+
+ /* Check for control options for Python warnings. */
+
+ if (wsgi_server_config->python_warnings) {
+ apr_array_header_t *options = NULL;
+ char **entries;
+
+ int i;
+
+ options = wsgi_server_config->python_warnings;
+ entries = (char **)options->elts;
+
+ for (i = 0; i < options->nelts; ++i) {
+#if PY_MAJOR_VERSION >= 3
+ wchar_t *s = NULL;
+ int len = strlen(entries[i])+1;
+
+ s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t));
+
+#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
+ wsgi_utf8_to_unicode_path(s, len, entries[i]);
+#else
+ mbstowcs(s, entries[i], len);
+#endif
+ PySys_AddWarnOption(s);
+#else
+ PySys_AddWarnOption(entries[i]);
+#endif
+ }
+ }
+
+ /* Check for Python HOME being overridden. */
+
+ python_home = wsgi_server_config->python_home;
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ if (wsgi_daemon_process && wsgi_daemon_process->group->python_home)
+ python_home = wsgi_daemon_process->group->python_home;
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ if (python_home) {
+ wchar_t *s = NULL;
+ int len = strlen(python_home)+1;
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Python home %s.", getpid(),
+ python_home);
+
+ s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t));
+
+#if defined(WIN32) && defined(APR_HAS_UNICODE_FS)
+ wsgi_utf8_to_unicode_path(s, len, python_home);
+#else
+ mbstowcs(s, python_home, len);
+#endif
+ Py_SetPythonHome(s);
+ }
+#else
+ if (python_home) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Python home %s.", getpid(),
+ python_home);
+
+ Py_SetPythonHome((char *)python_home);
+ }
+#endif
+
+ /*
+ * Set environment variable PYTHONHASHSEED. We need to
+ * make sure we remove the environment variable later
+ * so that it doesn't remain in the process environment
+ * and be inherited by execd sub processes.
+ */
+
+ if (wsgi_server_config->python_hash_seed != NULL) {
+ char *envvar = apr_pstrcat(p, "PYTHONHASHSEED=",
+ wsgi_server_config->python_hash_seed, NULL);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Setting hash seed to %s.",
+ getpid(), wsgi_server_config->python_hash_seed);
+ putenv(envvar);
+ }
+
+ /*
+ * Work around bug in Python 3.1 where it will crash
+ * when used in non console application on Windows if
+ * stdin/stdout have been initialised and aren't null.
+ * Supposed to be fixed in Python 3.3.
+ */
+
+#if defined(WIN32) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 3
+ _wputenv(L"PYTHONIOENCODING=cp1252:backslashreplace");
+#endif
+
+ /* Initialise Python. */
+
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Initializing Python.", getpid());
+
+ Py_Initialize();
+
+ /* Initialise threading. */
+
+ PyEval_InitThreads();
+
+ /*
+ * Remove the environment variable we set for the hash
+ * seed. This has to be done in os.environ, which will
+ * in turn remove it from process environ. This should
+ * only be necessary for the main interpreter. We need
+ * to do this before we release the GIL.
+ */
+
+ if (wsgi_server_config->python_hash_seed != NULL) {
+ PyObject *module = NULL;
+
+ module = PyImport_ImportModule("os");
+
+ if (module) {
+ PyObject *dict = NULL;
+ PyObject *object = NULL;
+ PyObject *key = NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "environ");
+
+ if (object) {
+#if PY_MAJOR_VERSION >= 3
+ key = PyUnicode_FromString("PYTHONHASHSEED");
+#else
+ key = PyString_FromString("PYTHONHASHSEED");
+#endif
+
+ PyObject_DelItem(object, key);
+
+ Py_DECREF(key);
+ }
+
+ Py_DECREF(module);
+ }
+ }
+
+ /*
+ * We now want to release the GIL. Before we do that
+ * though we remember what the current thread state is.
+ * We will use that later to restore the main thread
+ * state when we want to cleanup interpreters on
+ * shutdown.
+ */
+
+ wsgi_main_tstate = PyThreadState_Get();
+ PyEval_ReleaseThread(wsgi_main_tstate);
+
+ wsgi_python_initialized = 1;
+
+ /*
+ * Register cleanups to be performed on parent restart
+ * or shutdown. This will destroy Python itself.
+ */
+
+ apr_pool_cleanup_register(p, NULL, wsgi_python_parent_cleanup,
+ apr_pool_cleanup_null);
+ }
+}
+
+/*
+ * Functions for acquiring and subsequently releasing desired
+ * Python interpreter instance. When acquiring the interpreter
+ * a new interpreter instance will be created on demand if it
+ * is required. The Python GIL will be held on return when the
+ * interpreter is acquired.
+ */
+
+#if APR_HAS_THREADS
+apr_thread_mutex_t* wsgi_interp_lock = NULL;
+#endif
+
+PyObject *wsgi_interpreters = NULL;
+
+InterpreterObject *wsgi_acquire_interpreter(const char *name)
+{
+ PyThreadState *tstate = NULL;
+ PyInterpreterState *interp = NULL;
+ InterpreterObject *handle = NULL;
+
+ PyGILState_STATE state;
+
+ /*
+ * In a multithreaded MPM must protect the
+ * interpreters table. This lock is only needed to
+ * avoid a secondary thread coming in and creating
+ * the same interpreter if Python releases the GIL
+ * when an interpreter is being created.
+ */
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_lock(wsgi_interp_lock);
+#endif
+
+ /*
+ * This function should never be called when the
+ * Python GIL is held, so need to acquire it. Even
+ * though we may need to work with a sub
+ * interpreter, we need to acquire GIL against main
+ * interpreter first to work with interpreter
+ * dictionary.
+ */
+
+ state = PyGILState_Ensure();
+
+ /*
+ * Check if already have interpreter instance and
+ * if not need to create one.
+ */
+
+ handle = (InterpreterObject *)PyDict_GetItemString(wsgi_interpreters,
+ name);
+
+ if (!handle) {
+ handle = newInterpreterObject(name);
+
+ if (!handle) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Cannot create interpreter '%s'.",
+ getpid(), name);
+
+ PyErr_Print();
+ PyErr_Clear();
+
+ PyGILState_Release(state);
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(wsgi_interp_lock);
+#endif
+ return NULL;
+ }
+
+ PyDict_SetItemString(wsgi_interpreters, name, (PyObject *)handle);
+ }
+ else
+ Py_INCREF(handle);
+
+ interp = handle->interp;
+
+ /*
+ * Create new thread state object. We should only be
+ * getting called where no current active thread
+ * state, so no need to remember the old one. When
+ * working with the main Python interpreter always
+ * use the simplified API for GIL locking so any
+ * extension modules which use that will still work.
+ */
+
+ PyGILState_Release(state);
+
+#if APR_HAS_THREADS
+ apr_thread_mutex_unlock(wsgi_interp_lock);
+#endif
+
+ if (*name) {
+#if APR_HAS_THREADS
+ int thread_id = 0;
+ int *thread_handle = NULL;
+
+ apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key);
+
+ if (!thread_handle) {
+ thread_id = wsgi_thread_count++;
+ thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool,
+ &thread_id, sizeof(thread_id));
+ apr_threadkey_private_set(thread_handle, wsgi_thread_key);
+ }
+ else {
+ thread_id = *thread_handle;
+ }
+
+ tstate = apr_hash_get(handle->tstate_table, &thread_id,
+ sizeof(thread_id));
+
+ if (!tstate) {
+ tstate = PyThreadState_New(interp);
+
+ if (wsgi_server_config->verbose_debugging) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Create thread state for "
+ "thread %d against interpreter '%s'.", getpid(),
+ thread_id, handle->name);
+ }
+
+ apr_hash_set(handle->tstate_table, thread_handle,
+ sizeof(*thread_handle), tstate);
+ }
+#else
+ tstate = handle->tstate;
+#endif
+
+ PyEval_AcquireThread(tstate);
+ }
+ else {
+ PyGILState_Ensure();
+
+ /*
+ * When simplified GIL state API is used, the thread
+ * local data only persists for the extent of the top
+ * level matching ensure/release calls. We want to
+ * extend lifetime of the thread local data beyond
+ * that, retaining it for all requests within the one
+ * thread for the life of the process. To do that we
+ * need to artificially increment the reference count
+ * for the associated thread state object.
+ */
+
+ tstate = PyThreadState_Get();
+ if (tstate && tstate->gilstate_counter == 1)
+ tstate->gilstate_counter++;
+ }
+
+ return handle;
+}
+
+void wsgi_release_interpreter(InterpreterObject *handle)
+{
+ PyThreadState *tstate = NULL;
+
+ PyGILState_STATE state;
+
+ /*
+ * Need to release and destroy the thread state that
+ * was created against the interpreter. This will
+ * release the GIL. Note that it should be safe to
+ * always assume that the simplified GIL state API
+ * lock was originally unlocked as always calling in
+ * from an Apache thread when we acquire the
+ * interpreter in the first place.
+ */
+
+ if (*handle->name) {
+ tstate = PyThreadState_Get();
+ PyEval_ReleaseThread(tstate);
+ }
+ else
+ PyGILState_Release(PyGILState_UNLOCKED);
+
+ /*
+ * Need to reacquire the Python GIL just so we can
+ * decrement our reference count to the interpreter
+ * itself. If the interpreter has since been removed
+ * from the table of interpreters this will result
+ * in its destruction if its the last reference.
+ */
+
+ state = PyGILState_Ensure();
+
+ Py_DECREF(handle);
+
+ PyGILState_Release(state);
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_interp.h b/src/server/wsgi_interp.h
new file mode 100644
index 0000000..979c2b9
--- /dev/null
+++ b/src/server/wsgi_interp.h
@@ -0,0 +1,78 @@
+#ifndef WSGI_INTERP_H
+#define WSGI_INTERP_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct {
+ PyObject_HEAD
+ char *name;
+ PyInterpreterState *interp;
+ int owner;
+#if APR_HAS_THREADS
+ apr_hash_t *tstate_table;
+#else
+ PyThreadState *tstate;
+#endif
+} InterpreterObject;
+
+extern PyTypeObject Interpreter_Type;
+
+extern InterpreterObject *newInterpreterObject(const char *name);
+
+extern int wsgi_python_initialized;
+extern int wsgi_python_after_fork;
+
+#ifndef MOD_WSGI_DISABLE_EMBEDDED
+extern int wsgi_python_required;
+#endif
+
+extern const char *wsgi_python_path;
+extern const char *wsgi_python_eggs;
+
+#if APR_HAS_THREADS
+extern int wsgi_thread_count;
+extern apr_threadkey_t *wsgi_thread_key;
+#endif
+
+extern PyObject *wsgi_interpreters;
+
+#if APR_HAS_THREADS
+extern apr_thread_mutex_t *wsgi_interp_lock;
+#endif
+
+extern void wsgi_python_version(void);
+
+extern void wsgi_python_init(apr_pool_t *p);
+extern apr_status_t wsgi_python_term(void);
+
+extern InterpreterObject *wsgi_acquire_interpreter(const char *name);
+extern void wsgi_release_interpreter(InterpreterObject *handle);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_logger.c b/src/server/wsgi_logger.c
new file mode 100644
index 0000000..501a024
--- /dev/null
+++ b/src/server/wsgi_logger.c
@@ -0,0 +1,689 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_logger.h"
+
+#include "wsgi_server.h"
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct {
+ PyObject_HEAD
+ const char *target;
+ request_rec *r;
+ int level;
+ char *s;
+ int l;
+ int expired;
+#if PY_MAJOR_VERSION < 3
+ int softspace;
+#endif
+} LogObject;
+
+PyTypeObject Log_Type;
+
+PyObject *newLogObject(request_rec *r, int level, const char *target)
+{
+ LogObject *self;
+
+#if PY_MAJOR_VERSION >= 3
+ PyObject *module = NULL;
+ PyObject *dict = NULL;
+ PyObject *object = NULL;
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+
+ module = PyImport_ImportModule("io");
+
+ if (!module)
+ return NULL;
+
+ dict = PyModule_GetDict(module);
+ object = PyDict_GetItemString(dict, "TextIOWrapper");
+
+ if (!object) {
+ PyErr_SetString(PyExc_NameError,
+ "name 'TextIOWrapper' is not defined");
+ return NULL;
+ }
+#endif
+
+ self = PyObject_New(LogObject, &Log_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->target = target;
+ self->r = r;
+ self->level = APLOG_NOERRNO|level;
+ self->s = NULL;
+ self->l = 0;
+ self->expired = 0;
+#if PY_MAJOR_VERSION < 3
+ self->softspace = 0;
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ Py_INCREF(object);
+ args = Py_BuildValue("(OssOO)", self, "utf-8", "replace",
+ Py_None, Py_True);
+ Py_DECREF(self);
+ result = PyEval_CallObject(object, args);
+ Py_DECREF(args);
+ Py_DECREF(object);
+
+ return result;
+#else
+ return (PyObject *)self;
+#endif
+}
+
+#if 0
+static void Log_file(LogObject *self, const char *s, int l)
+{
+ /*
+ * XXX This function is not currently being used.
+ * The intention was that it be called instead of
+ * Log_call() when 'target' is non zero. This would
+ * be the case for 'stdout' and 'stderr'. Doing
+ * this bypasses normally Apache logging mechanisms
+ * though. May reawaken this code in mod_wsgi 4.0
+ * by way of a mechanism to divert logging from a
+ * daemon process to specfic log file or pipe using
+ * an option to WSGIDaemonProcess.
+ */
+
+ char errstr[MAX_STRING_LEN];
+
+ int plen = 0;
+ int slen = 0;
+
+ apr_file_t *logf = NULL;
+
+ if (self->r)
+ logf = self->r->server->error_log;
+ else
+ logf = wsgi_server->error_log;
+
+ errstr[0] = '[';
+ ap_recent_ctime(errstr + 1, apr_time_now());
+ errstr[1 + APR_CTIME_LEN - 1] = ']';
+ errstr[1 + APR_CTIME_LEN ] = ' ';
+ plen = 1 + APR_CTIME_LEN + 1;
+
+ if (self->target) {
+ int len;
+
+ errstr[plen++] = '[';
+
+ len = strlen(self->target);
+ memcpy(errstr+plen, self->target, len);
+
+ plen += len;
+
+ errstr[plen++] = ']';
+ errstr[plen++] = ' ';
+ }
+
+ slen = MAX_STRING_LEN - plen - 1;
+
+ Py_BEGIN_ALLOW_THREADS
+
+ /*
+ * We actually break long lines up into segments
+ * of around 8192 characters, with the date/time
+ * and target information prefixing each line.
+ * This is just to avoid having to allocate more
+ * memory just to format the line with prefix.
+ * We want to avoid writing the prefix separately
+ * so at least try and write line in one atomic
+ * operation.
+ */
+
+ while (1) {
+ if (l > slen) {
+ memcpy(errstr+plen, s, slen);
+ errstr[plen+slen] = '\n';
+ apr_file_write_full(logf, errstr, plen+slen+1, NULL);
+ apr_file_flush(logf);
+ s += slen;
+ l -= slen;
+ }
+ else {
+ memcpy(errstr+plen, s, l);
+ errstr[plen+l] = '\n';
+ apr_file_write_full(logf, errstr, plen+l+1, NULL);
+ apr_file_flush(logf);
+ break;
+ }
+ }
+
+ Py_END_ALLOW_THREADS
+}
+#endif
+
+static void Log_call(LogObject *self, const char *s, int l)
+{
+ /*
+ * The length of the string to be logged is ignored
+ * for now. We just pass the whole string to the
+ * Apache error log functions. It will actually
+ * truncate it at some value less than 8192
+ * characters depending on the length of the prefix
+ * to go at the front. If there are embedded NULLs
+ * then truncation will occur at that point. That
+ * truncation occurs like this is also what happens
+ * if using FASTCGI solutions for Apache, so not
+ * doing anything different here.
+ */
+
+ if (self->r) {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_rerror(APLOG_MARK, self->level, 0, self->r, "%s", s);
+ Py_END_ALLOW_THREADS
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ ap_log_error(APLOG_MARK, self->level, 0, wsgi_server, "%s", s);
+ Py_END_ALLOW_THREADS
+ }
+}
+
+static void Log_dealloc(LogObject *self)
+{
+ if (self->s) {
+ if (!self->expired)
+ Log_call(self, self->s, self->l);
+
+ free(self->s);
+ }
+
+ PyObject_Del(self);
+}
+
+static PyObject *Log_flush(LogObject *self, PyObject *args)
+{
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (self->s) {
+ Log_call(self, self->s, self->l);
+
+ free(self->s);
+ self->s = NULL;
+ self->l = 0;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Log_close(LogObject *self, PyObject *args)
+{
+ PyObject *result = NULL;
+
+ if (!self->expired)
+ result = Log_flush(self, args);
+
+ Py_XDECREF(result);
+
+ self->r = NULL;
+ self->expired = 1;
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Log_isatty(LogObject *self, PyObject *args)
+{
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+static void Log_queue(LogObject *self, const char *msg, int len)
+{
+ const char *p = NULL;
+ const char *q = NULL;
+ const char *e = NULL;
+
+ p = msg;
+ e = p + len;
+
+ /*
+ * Break string on newline. This is on assumption
+ * that primarily textual information being logged.
+ */
+
+ q = p;
+ while (q != e) {
+ if (*q == '\n')
+ break;
+ q++;
+ }
+
+ while (q != e) {
+ /* Output each complete line. */
+
+ if (self->s) {
+ /* Need to join with buffered value. */
+
+ int m = 0;
+ int n = 0;
+ char *s = NULL;
+
+ m = self->l;
+ n = m+q-p+1;
+
+ s = (char *)malloc(n);
+ memcpy(s, self->s, m);
+ memcpy(s+m, p, q-p);
+ s[n-1] = '\0';
+
+ free(self->s);
+ self->s = NULL;
+ self->l = 0;
+
+ Log_call(self, s, n-1);
+
+ free(s);
+ }
+ else {
+ int n = 0;
+ char *s = NULL;
+
+ n = q-p+1;
+
+ s = (char *)malloc(n);
+ memcpy(s, p, q-p);
+ s[n-1] = '\0';
+
+ Log_call(self, s, n-1);
+
+ free(s);
+ }
+
+ p = q+1;
+
+ /* Break string on newline. */
+
+ q = p;
+ while (q != e) {
+ if (*q == '\n')
+ break;
+ q++;
+ }
+ }
+
+ if (p != e) {
+ /* Save away incomplete line. */
+
+ if (self->s) {
+ /* Need to join with buffered value. */
+
+ int m = 0;
+ int n = 0;
+
+ m = self->l;
+ n = m+e-p+1;
+
+ self->s = (char *)realloc(self->s, n);
+ memcpy(self->s+m, p, e-p);
+ self->s[n-1] = '\0';
+ self->l = n-1;
+ }
+ else {
+ int n = 0;
+
+ n = e-p+1;
+
+ self->s = (char *)malloc(n);
+ memcpy(self->s, p, n-1);
+ self->s[n-1] = '\0';
+ self->l = n-1;
+ }
+ }
+}
+
+static PyObject *Log_write(LogObject *self, PyObject *args)
+{
+ const char *msg = NULL;
+ int len = -1;
+
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "s#:write", &msg, &len))
+ return NULL;
+
+ Log_queue(self, msg, len);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Log_writelines(LogObject *self, PyObject *args)
+{
+ PyObject *sequence = NULL;
+ PyObject *iterator = NULL;
+ PyObject *item = NULL;
+
+ if (self->expired) {
+ PyErr_SetString(PyExc_RuntimeError, "log object has expired");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, "O:writelines", &sequence))
+ return NULL;
+
+ iterator = PyObject_GetIter(sequence);
+
+ if (iterator == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "argument must be sequence of strings");
+
+ return NULL;
+ }
+
+ while ((item = PyIter_Next(iterator))) {
+ PyObject *result = NULL;
+ PyObject *args = NULL;
+
+ args = PyTuple_Pack(1, item);
+
+ result = Log_write(self, args);
+
+ Py_DECREF(args);
+ Py_DECREF(item);
+
+ if (!result) {
+ Py_DECREF(iterator);
+
+ PyErr_SetString(PyExc_TypeError,
+ "argument must be sequence of strings");
+
+ return NULL;
+ }
+ }
+
+ Py_DECREF(iterator);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+#if PY_MAJOR_VERSION >= 3
+static PyObject *Log_readable(LogObject *self, PyObject *args)
+{
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+static PyObject *Log_seekable(LogObject *self, PyObject *args)
+{
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+static PyObject *Log_writable(LogObject *self, PyObject *args)
+{
+ Py_INCREF(Py_True);
+ return Py_True;
+}
+#endif
+
+static PyObject *Log_closed(LogObject *self, void *closure)
+{
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+#if PY_MAJOR_VERSION < 3
+static PyObject *Log_get_softspace(LogObject *self, void *closure)
+{
+ return PyInt_FromLong(self->softspace);
+}
+
+static int Log_set_softspace(LogObject *self, PyObject *value)
+{
+ int new;
+
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "can't delete softspace attribute");
+ return -1;
+ }
+
+ new = PyInt_AsLong(value);
+ if (new == -1 && PyErr_Occurred())
+ return -1;
+
+ self->softspace = new;
+
+ return 0;
+}
+
+#else
+
+static PyObject *Log_get_encoding(LogObject *self, void *closure)
+{
+ return PyUnicode_FromString("utf-8");
+}
+
+static PyObject *Log_get_errors(LogObject *self, void *closure)
+{
+ return PyUnicode_FromString("replace");
+}
+#endif
+
+static PyMethodDef Log_methods[] = {
+ { "flush", (PyCFunction)Log_flush, METH_NOARGS, 0 },
+ { "close", (PyCFunction)Log_close, METH_NOARGS, 0 },
+ { "isatty", (PyCFunction)Log_isatty, METH_NOARGS, 0 },
+ { "write", (PyCFunction)Log_write, METH_VARARGS, 0 },
+ { "writelines", (PyCFunction)Log_writelines, METH_VARARGS, 0 },
+#if PY_MAJOR_VERSION >= 3
+ { "readable", (PyCFunction)Log_readable, METH_NOARGS, 0 },
+ { "seekable", (PyCFunction)Log_seekable, METH_NOARGS, 0 },
+ { "writable", (PyCFunction)Log_writable, METH_NOARGS, 0 },
+#endif
+ { NULL, NULL}
+};
+
+static PyGetSetDef Log_getset[] = {
+ { "closed", (getter)Log_closed, NULL, 0 },
+#if PY_MAJOR_VERSION < 3
+ { "softspace", (getter)Log_get_softspace, (setter)Log_set_softspace, 0 },
+#else
+ { "encoding", (getter)Log_get_encoding, NULL, 0 },
+ { "errors", (getter)Log_get_errors, NULL, 0 },
+#endif
+ { NULL },
+};
+
+PyTypeObject Log_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "mod_wsgi.Log", /*tp_name*/
+ sizeof(LogObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Log_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ Log_methods, /*tp_methods*/
+ 0, /*tp_members*/
+ Log_getset, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+void wsgi_log_python_error(request_rec *r, PyObject *log, const char *filename)
+{
+ PyObject *m = NULL;
+ PyObject *result = NULL;
+
+ PyObject *type = NULL;
+ PyObject *value = NULL;
+ PyObject *traceback = NULL;
+
+ PyObject *xlog = NULL;
+
+ if (!PyErr_Occurred())
+ return;
+
+ if (!log) {
+ PyErr_Fetch(&type, &value, &traceback);
+
+ xlog = newLogObject(r, APLOG_ERR, NULL);
+
+ log = xlog;
+
+ PyErr_Restore(type, value, traceback);
+
+ type = NULL;
+ value = NULL;
+ traceback = NULL;
+ }
+
+ if (PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ Py_BEGIN_ALLOW_THREADS
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "mod_wsgi (pid=%d): SystemExit exception raised by "
+ "WSGI script '%s' ignored.", getpid(), filename);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): SystemExit exception raised by "
+ "WSGI script '%s' ignored.", getpid(), filename);
+ }
+ Py_END_ALLOW_THREADS
+ }
+ else {
+ Py_BEGIN_ALLOW_THREADS
+ if (r) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "mod_wsgi (pid=%d): Exception occurred processing "
+ "WSGI script '%s'.", getpid(), filename);
+ }
+ else {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server,
+ "mod_wsgi (pid=%d): Exception occurred processing "
+ "WSGI script '%s'.", getpid(), filename);
+ }
+ Py_END_ALLOW_THREADS
+ }
+
+ PyErr_Fetch(&type, &value, &traceback);
+ PyErr_NormalizeException(&type, &value, &traceback);
+
+ if (!value) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+
+ if (!traceback) {
+ traceback = Py_None;
+ Py_INCREF(traceback);
+ }
+
+ m = PyImport_ImportModule("traceback");
+
+ if (m) {
+ PyObject *d = NULL;
+ PyObject *o = NULL;
+ d = PyModule_GetDict(m);
+ o = PyDict_GetItemString(d, "print_exception");
+ if (o) {
+ PyObject *args = NULL;
+ Py_INCREF(o);
+ args = Py_BuildValue("(OOOOO)", type, value, traceback,
+ Py_None, log);
+ result = PyEval_CallObject(o, args);
+ Py_DECREF(args);
+ Py_DECREF(o);
+ }
+ }
+
+ if (!result) {
+ /*
+ * If can't output exception and traceback then
+ * use PyErr_Print to dump out details of the
+ * exception. For SystemExit though if we do
+ * that the process will actually be terminated
+ * so can only clear the exception information
+ * and keep going.
+ */
+
+ PyErr_Restore(type, value, traceback);
+
+ if (!PyErr_ExceptionMatches(PyExc_SystemExit)) {
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ else {
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(traceback);
+ }
+
+ Py_XDECREF(result);
+
+ Py_XDECREF(m);
+
+ Py_XDECREF(xlog);
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_logger.h b/src/server/wsgi_logger.h
new file mode 100644
index 0000000..0a89360
--- /dev/null
+++ b/src/server/wsgi_logger.h
@@ -0,0 +1,40 @@
+#ifndef WSGI_LOGGER_H
+#define WSGI_LOGGER_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern PyTypeObject Log_Type;
+
+extern PyObject *newLogObject(request_rec *r, int level, const char *target);
+
+extern void wsgi_log_python_error(request_rec *r, PyObject *log,
+ const char *filename);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c
new file mode 100644
index 0000000..30569f2
--- /dev/null
+++ b/src/server/wsgi_metrics.c
@@ -0,0 +1,94 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_metrics.h"
+
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Thread utilisation. On start and end of requests,
+ * and when utilisation is requested, we acrue an
+ * ongoing utilisation time value so can monitor how
+ * busy we are handling requests.
+ */
+
+int wsgi_active_requests = 0;
+static double wsgi_thread_utilization = 0.0;
+static apr_time_t wsgi_utilization_last = 0;
+int wsgi_dump_blocked_requests = 0;
+
+/* Request tracking and timing. */
+
+apr_thread_mutex_t* wsgi_monitor_lock = NULL;
+
+static double wsgi_utilization_time(int adjustment)
+{
+ apr_time_t now;
+ double utilization = wsgi_thread_utilization;
+
+ apr_thread_mutex_lock(wsgi_monitor_lock);
+
+ now = apr_time_now();
+
+ if (wsgi_utilization_last != 0.0) {
+ utilization = (now - wsgi_utilization_last) / 1000000.0;
+
+ if (utilization < 0)
+ utilization = 0;
+
+ utilization = wsgi_active_requests * utilization;
+ wsgi_thread_utilization += utilization;
+ utilization = wsgi_thread_utilization;
+ }
+
+ wsgi_utilization_last = now;
+ wsgi_active_requests += adjustment;
+
+ apr_thread_mutex_unlock(wsgi_monitor_lock);
+
+ return utilization;
+}
+
+double wsgi_start_request(void)
+{
+ return wsgi_utilization_time(1);
+}
+
+double wsgi_end_request(void)
+{
+ return wsgi_utilization_time(-1);
+}
+
+static PyObject *wsgi_get_thread_utilization(PyObject *self, PyObject *args)
+{
+ return PyFloat_FromDouble(wsgi_utilization_time(0));
+}
+
+PyMethodDef wsgi_get_utilization_method[] = {
+ { "thread_utilization", (PyCFunction)wsgi_get_thread_utilization,
+ METH_NOARGS, 0 },
+ { NULL },
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_metrics.h b/src/server/wsgi_metrics.h
new file mode 100644
index 0000000..6ca27bd
--- /dev/null
+++ b/src/server/wsgi_metrics.h
@@ -0,0 +1,43 @@
+#ifndef WSGI_METRICS_H
+#define WSGI_METRICS_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern int wsgi_active_requests;
+extern int wsgi_dump_blocked_requests;
+
+extern apr_thread_mutex_t* wsgi_monitor_lock;
+
+extern PyMethodDef wsgi_get_utilization_method[];
+
+extern double wsgi_start_request(void);
+extern double wsgi_end_request(void);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_python.h b/src/server/wsgi_python.h
new file mode 100644
index 0000000..54c3ded
--- /dev/null
+++ b/src/server/wsgi_python.h
@@ -0,0 +1,113 @@
+#ifndef WSGI_PYTHON_H
+#define WSGI_PYTHON_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include <Python.h>
+
+#if !defined(PY_VERSION_HEX)
+#error Sorry, Python developer package does not appear to be installed.
+#endif
+
+#if PY_VERSION_HEX <= 0x02030000
+#error Sorry, mod_wsgi requires at least Python 2.3.0 for Python 2.X.
+#endif
+
+#if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000
+#error Sorry, mod_wsgi requires at least Python 3.1.0 for Python 3.X.
+#endif
+
+#if !defined(WITH_THREAD)
+#error Sorry, mod_wsgi requires that Python supporting thread.
+#endif
+
+#include "structmember.h"
+#include "compile.h"
+#include "node.h"
+#include "osdefs.h"
+#include "frameobject.h"
+
+#ifndef PyVarObject_HEAD_INIT
+#define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+#endif
+
+#ifndef Py_REFCNT
+#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+#endif
+
+#ifndef Py_TYPE
+#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+
+#ifndef Py_SIZE
+#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+#define PyStringObject PyBytesObject
+#define PyString_Check PyBytes_Check
+#define PyString_Size PyBytes_Size
+#define PyString_AsString PyBytes_AsString
+#define PyString_FromString PyBytes_FromString
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define _PyString_Resize _PyBytes_Resize
+#endif
+
+#if PY_MAJOR_VERSION < 3
+#ifndef PyBytesObject
+#define PyBytesObject PyStringObject
+#define PyBytes_Type PyString_Type
+
+#define PyBytes_Check PyString_Check
+#define PyBytes_CheckExact PyString_CheckExact
+#define PyBytes_CHECK_INTERNED PyString_CHECK_INTERNED
+#define PyBytes_AS_STRING PyString_AS_STRING
+#define PyBytes_GET_SIZE PyString_GET_SIZE
+#define Py_TPFLAGS_BYTES_SUBCLASS Py_TPFLAGS_STRING_SUBCLASS
+
+#define PyBytes_FromStringAndSize PyString_FromStringAndSize
+#define PyBytes_FromString PyString_FromString
+#define PyBytes_FromFormatV PyString_FromFormatV
+#define PyBytes_FromFormat PyString_FromFormat
+#define PyBytes_Size PyString_Size
+#define PyBytes_AsString PyString_AsString
+#define PyBytes_Repr PyString_Repr
+#define PyBytes_Concat PyString_Concat
+#define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#define _PyBytes_Resize _PyString_Resize
+#define _PyBytes_Eq _PyString_Eq
+#define PyBytes_Format PyString_Format
+#define _PyBytes_FormatLong _PyString_FormatLong
+#define PyBytes_DecodeEscape PyString_DecodeEscape
+#define _PyBytes_Join _PyString_Join
+#define PyBytes_AsStringAndSize PyString_AsStringAndSize
+#define _PyBytes_InsertThousandsGrouping _PyString_InsertThousandsGrouping
+#endif
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_restrict.c b/src/server/wsgi_restrict.c
new file mode 100644
index 0000000..24fc5d9
--- /dev/null
+++ b/src/server/wsgi_restrict.c
@@ -0,0 +1,98 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_restrict.h"
+
+/* ------------------------------------------------------------------------- */
+
+PyTypeObject Restricted_Type;
+
+RestrictedObject *newRestrictedObject(const char *s)
+{
+ RestrictedObject *self;
+
+ self = PyObject_New(RestrictedObject, &Restricted_Type);
+ if (self == NULL)
+ return NULL;
+
+ self->s = s;
+
+ return self;
+}
+
+static void Restricted_dealloc(RestrictedObject *self)
+{
+ PyObject_Del(self);
+}
+
+static PyObject *Restricted_getattr(RestrictedObject *self, char *name)
+{
+ PyErr_Format(PyExc_IOError, "%s access restricted by mod_wsgi", self->s);
+
+ return NULL;
+}
+
+PyTypeObject Restricted_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "mod_wsgi.Restricted", /*tp_name*/
+ sizeof(RestrictedObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Restricted_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ (getattrfunc)Restricted_getattr, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_restrict.h b/src/server/wsgi_restrict.h
new file mode 100644
index 0000000..e6e1267
--- /dev/null
+++ b/src/server/wsgi_restrict.h
@@ -0,0 +1,43 @@
+#ifndef WSGI_RESTRICT_H
+#define WSGI_RESTRICT_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+
+/* ------------------------------------------------------------------------- */
+
+/* Restricted object to stop access to STDIN/STDOUT. */
+
+typedef struct {
+ PyObject_HEAD
+ const char *s;
+} RestrictedObject;
+
+extern PyTypeObject Restricted_Type;
+
+extern RestrictedObject *newRestrictedObject(const char *s);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_server.c b/src/server/wsgi_server.c
new file mode 100644
index 0000000..de6c189
--- /dev/null
+++ b/src/server/wsgi_server.c
@@ -0,0 +1,132 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_server.h"
+
+#include "wsgi_daemon.h"
+
+/* ------------------------------------------------------------------------- */
+
+/* Base server object. */
+
+server_rec *wsgi_server = NULL;
+
+apr_pool_t *wsgi_daemon_pool = NULL;
+const char *wsgi_daemon_group = "";
+
+/* Process information. */
+
+pid_t wsgi_parent_pid = 0;
+
+/* New Relic monitoring agent. */
+
+const char *wsgi_newrelic_config_file = NULL;
+const char *wsgi_newrelic_environment = NULL;
+
+/* Python interpreter state. */
+
+PyThreadState *wsgi_main_tstate = NULL;
+
+/* Configuration objects. */
+
+WSGIServerConfig *wsgi_server_config = NULL;
+
+WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p)
+{
+ WSGIScriptFile *object = NULL;
+
+ object = (WSGIScriptFile *)apr_pcalloc(p, sizeof(WSGIScriptFile));
+
+ object->handler_script = NULL;
+ object->application_group = NULL;
+ object->process_group = NULL;
+
+ return object;
+}
+
+WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p)
+{
+ WSGIServerConfig *object = NULL;
+
+ object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig));
+
+ object->pool = p;
+
+ object->alias_list = NULL;
+
+ object->socket_prefix = NULL;
+
+#if defined(MOD_WSGI_WITH_DAEMONS)
+ object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi";
+ object->socket_prefix = ap_server_root_relative(p, object->socket_prefix);
+#endif
+
+ object->verbose_debugging = 0;
+
+ object->python_warnings = NULL;
+
+ object->py3k_warning_flag = -1;
+ object->python_optimize = -1;
+ object->dont_write_bytecode = -1;
+
+ object->lang = NULL;
+ object->locale = NULL;
+
+ object->python_home = NULL;
+ object->python_path = NULL;
+ object->python_eggs = NULL;
+
+ object->python_hash_seed = NULL;
+
+ object->restrict_embedded = -1;
+ object->restrict_stdin = -1;
+ object->restrict_stdout = -1;
+ object->restrict_signal = -1;
+
+#if defined(WIN32) || defined(DARWIN)
+ object->case_sensitivity = 0;
+#else
+ object->case_sensitivity = 1;
+#endif
+
+ object->restrict_process = NULL;
+
+ object->process_group = NULL;
+ object->application_group = NULL;
+ object->callable_object = NULL;
+
+ object->dispatch_script = NULL;
+
+ object->pass_apache_request = -1;
+ object->pass_authorization = -1;
+ object->script_reloading = -1;
+ object->error_override = -1;
+ object->chunked_request = -1;
+
+ object->enable_sendfile = -1;
+
+ object->newrelic_config_file = NULL;
+ object->newrelic_environment = NULL;
+
+ return object;
+}
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_server.h b/src/server/wsgi_server.h
new file mode 100644
index 0000000..112ff5b
--- /dev/null
+++ b/src/server/wsgi_server.h
@@ -0,0 +1,125 @@
+#ifndef WSGI_SERVER
+#define WSGI_SERVER
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern server_rec *wsgi_server;
+extern pid_t wsgi_parent_pid;
+extern const char *wsgi_daemon_group;
+
+/* New Relic monitoring agent. */
+
+extern const char *wsgi_newrelic_config_file;
+extern const char *wsgi_newrelic_environment;
+
+/* Python interpreter state. */
+
+extern PyThreadState *wsgi_main_tstate;
+
+typedef struct {
+ const char *location;
+ const char *application;
+ ap_regex_t *regexp;
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+ int pass_authorization;
+} WSGIAliasEntry;
+
+typedef struct {
+ const char *handler_script;
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+ const char *pass_authorization;
+} WSGIScriptFile;
+
+typedef struct {
+ apr_pool_t *pool;
+
+ apr_array_header_t *alias_list;
+
+ const char *socket_prefix;
+ apr_lockmech_e lock_mechanism;
+
+ int verbose_debugging;
+
+ apr_array_header_t *python_warnings;
+
+ int python_optimize;
+ int py3k_warning_flag;
+ int dont_write_bytecode;
+
+ const char *lang;
+ const char *locale;
+
+ const char *python_home;
+ const char *python_path;
+ const char *python_eggs;
+
+ const char *python_hash_seed;
+
+ int restrict_embedded;
+ int restrict_stdin;
+ int restrict_stdout;
+ int restrict_signal;
+
+ int case_sensitivity;
+
+ apr_table_t *restrict_process;
+
+ const char *process_group;
+ const char *application_group;
+ const char *callable_object;
+
+ WSGIScriptFile *dispatch_script;
+
+ int pass_apache_request;
+ int pass_authorization;
+ int script_reloading;
+ int error_override;
+ int chunked_request;
+
+ int enable_sendfile;
+
+ apr_hash_t *handler_scripts;
+
+ const char *newrelic_config_file;
+ const char *newrelic_environment;
+} WSGIServerConfig;
+
+extern WSGIServerConfig *wsgi_server_config;
+
+extern WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p);
+extern WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p);
+
+extern apr_pool_t *wsgi_daemon_pool;
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_stream.c b/src/server/wsgi_stream.c
new file mode 100644
index 0000000..2b40bc4
--- /dev/null
+++ b/src/server/wsgi_stream.c
@@ -0,0 +1,255 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_stream.h"
+
+/* ------------------------------------------------------------------------- */
+
+PyTypeObject Stream_Type;
+
+static PyObject *Stream_new(PyTypeObject *type, PyObject *args,
+ PyObject *kwds)
+{
+ StreamObject *self;
+
+ self = (StreamObject *)type->tp_alloc(type, 0);
+ if (self == NULL)
+ return NULL;
+
+ self->filelike = Py_None;
+ Py_INCREF(self->filelike);
+
+ self->blksize = 0;
+
+ return (PyObject *)self;
+}
+
+static int Stream_init(StreamObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *filelike = NULL;
+ apr_size_t blksize = HUGE_STRING_LEN;
+
+ static char *kwlist[] = { "filelike", "blksize", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|l:FileWrapper", kwlist,
+ &filelike, &blksize)) {
+ return -1;
+ }
+
+ if (filelike) {
+ PyObject *tmp = NULL;
+
+ tmp = self->filelike;
+ Py_INCREF(filelike);
+ self->filelike = filelike;
+ Py_XDECREF(tmp);
+ }
+
+ self->blksize = blksize;
+
+ return 0;
+}
+
+static void Stream_dealloc(StreamObject *self)
+{
+ Py_XDECREF(self->filelike);
+
+ Py_TYPE(self)->tp_free(self);
+}
+
+static PyObject *Stream_iter(StreamObject *self)
+{
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *Stream_iternext(StreamObject *self)
+{
+ PyObject *attribute = NULL;
+ PyObject *method = NULL;
+ PyObject *args = NULL;
+ PyObject *result = NULL;
+
+ attribute = PyObject_GetAttrString((PyObject *)self, "filelike");
+
+ if (!attribute) {
+ PyErr_SetString(PyExc_KeyError,
+ "file wrapper no filelike attribute");
+ return 0;
+ }
+
+ method = PyObject_GetAttrString(attribute, "read");
+
+ if (!method) {
+ PyErr_SetString(PyExc_KeyError,
+ "file like object has no read() method");
+ Py_DECREF(attribute);
+ return 0;
+ }
+
+ Py_DECREF(attribute);
+
+ attribute = PyObject_GetAttrString((PyObject *)self, "blksize");
+
+ if (!attribute) {
+ PyErr_SetString(PyExc_KeyError,
+ "file wrapper has no blksize attribute");
+ Py_DECREF(method);
+ return 0;
+ }
+
+ if (!PyLong_Check(attribute)) {
+ PyErr_SetString(PyExc_KeyError,
+ "file wrapper blksize attribute not integer");
+ Py_DECREF(method);
+ Py_DECREF(attribute);
+ return 0;
+ }
+
+ args = Py_BuildValue("(O)", attribute);
+ result = PyEval_CallObject(method, args);
+
+ Py_DECREF(args);
+ Py_DECREF(method);
+ Py_DECREF(attribute);
+
+ if (!result)
+ return 0;
+
+ if (PyString_Check(result)) {
+ if (PyString_Size(result) == 0) {
+ PyErr_SetObject(PyExc_StopIteration, Py_None);
+ Py_DECREF(result);
+ return 0;
+ }
+
+ return result;
+ }
+
+ Py_DECREF(result);
+
+ PyErr_SetString(PyExc_TypeError,
+ "file like object yielded non string type");
+
+ return 0;
+}
+
+static PyObject *Stream_close(StreamObject *self, PyObject *args)
+{
+ PyObject *method = NULL;
+ PyObject *result = NULL;
+
+ if (!self->filelike || self->filelike == Py_None) {
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+
+ method = PyObject_GetAttrString(self->filelike, "close");
+
+ if (method) {
+ result = PyEval_CallObject(method, (PyObject *)NULL);
+ if (!result)
+ PyErr_Clear();
+ Py_DECREF(method);
+ }
+
+ Py_XDECREF(result);
+
+ Py_DECREF(self->filelike);
+ self->filelike = NULL;
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *Stream_get_filelike(StreamObject *self, void *closure)
+{
+ Py_INCREF(self->filelike);
+ return self->filelike;
+}
+
+
+static PyObject *Stream_get_blksize(StreamObject *self, void *closure)
+{
+ return PyLong_FromLong(self->blksize);
+}
+
+static PyMethodDef Stream_methods[] = {
+ { "close", (PyCFunction)Stream_close, METH_NOARGS, 0 },
+ { NULL, NULL }
+};
+
+static PyGetSetDef Stream_getset[] = {
+ { "filelike", (getter)Stream_get_filelike, NULL, 0 },
+ { "blksize", (getter)Stream_get_blksize, NULL, 0 },
+ { NULL },
+};
+
+PyTypeObject Stream_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "mod_wsgi.FileWrapper", /*tp_name*/
+ sizeof(StreamObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)Stream_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+#if defined(Py_TPFLAGS_HAVE_ITER)
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/
+#else
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+#endif
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ (getiterfunc)Stream_iter, /*tp_iter*/
+ (iternextfunc)Stream_iternext, /*tp_iternext*/
+ Stream_methods, /*tp_methods*/
+ 0, /*tp_members*/
+ Stream_getset, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ (initproc)Stream_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ Stream_new, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_stream.h b/src/server/wsgi_stream.h
new file mode 100644
index 0000000..9065b8d
--- /dev/null
+++ b/src/server/wsgi_stream.h
@@ -0,0 +1,41 @@
+#ifndef WSGI_STREAM_H
+#define WSGI_STREAM_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+#include "wsgi_apache.h"
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *filelike;
+ apr_size_t blksize;
+} StreamObject;
+
+extern PyTypeObject Stream_Type;
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_validate.c b/src/server/wsgi_validate.c
new file mode 100644
index 0000000..7c3b15b
--- /dev/null
+++ b/src/server/wsgi_validate.c
@@ -0,0 +1,172 @@
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_validate.h"
+
+#include "wsgi_convert.h"
+
+#include <ctype.h>
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * A WSGI response status line consists of a status code and a reason
+ * phrase separated by one or more space characters. The status code is
+ * a 3 digit integer. The reason phrase is any text excluding control
+ * characters and specifically excluding any carriage return or line
+ * feed characters. Technically the reason phrase can be empty so long
+ * as there still is at least a single space after the status code.
+ */
+
+int wsgi_validate_status_line(PyObject *value)
+{
+ const char *s;
+
+ if (!PyBytes_Check(value)) {
+ PyErr_Format(PyExc_TypeError, "expected byte string object for "
+ "status line, value of type %.200s found",
+ value->ob_type->tp_name);
+ return 0;
+ }
+
+ s = PyBytes_AsString(value);
+
+ if (!isdigit(*s++) || !isdigit(*s++) || !isdigit(*s++)) {
+ PyErr_SetString(PyExc_ValueError,
+ "status code is not a 3 digit integer");
+ return 0;
+ }
+
+ if (isdigit(*s)) {
+ PyErr_SetString(PyExc_ValueError,
+ "status code is not a 3 digit integer");
+ return 0;
+ }
+
+ if (*s != ' ') {
+ PyErr_SetString(PyExc_ValueError, "no space following status code");
+ return 0;
+ }
+
+ if (!*s) {
+ PyErr_SetString(PyExc_ValueError, "no reason phrase supplied");
+ return 0;
+ }
+
+ while (*s) {
+ if (iscntrl(*s)) {
+ PyErr_SetString(PyExc_ValueError,
+ "control character present in reason phrase");
+ return 0;
+ }
+ s++;
+ }
+
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * A WSGI header name is a token consisting of one or more characters
+ * except control characters, the separator characters "(", ")", "<",
+ * ">", "@", ",", ";", ":", "\", <">, "/", "[", "]", "?", "=", "{", "}"
+ * and the space character. Only bother checking for control characters
+ * and space characters as it is only carriage return, line feed,
+ * leading and trailing white space that are really a problem.
+ */
+
+int wsgi_validate_header_name(PyObject *value)
+{
+ const char *s;
+
+ if (!PyBytes_Check(value)) {
+ PyErr_Format(PyExc_TypeError, "expected byte string object for "
+ "header name, value of type %.200s found",
+ value->ob_type->tp_name);
+ return 0;
+ }
+
+ s = PyBytes_AsString(value);
+
+ if (!*s) {
+ PyErr_SetString(PyExc_ValueError, "header name is empty");
+ return 0;
+ }
+
+ while (*s) {
+ if (iscntrl(*s)) {
+ PyErr_SetString(PyExc_ValueError,
+ "control character present in header name");
+ return 0;
+ }
+
+ if (*s == ' ') {
+ PyErr_SetString(PyExc_ValueError,
+ "space character present in header name");
+ return 0;
+ }
+ s++;
+ }
+
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * A WSGI header value consists of any number of characters except
+ * control characters. Only bother checking for carriage return and line
+ * feed characters as it is not possible to trust that applications will
+ * not use control characters. In practice the intent is that WSGI
+ * applications shouldn't use embedded carriage return and line feed
+ * characters to prevent attempts at line continuation which may cause
+ * problems with some hosting mechanisms. In other words, the header
+ * value should be all on one line.
+ */
+
+int wsgi_validate_header_value(PyObject *value)
+{
+ const char *s;
+
+ if (!PyBytes_Check(value)) {
+ PyErr_Format(PyExc_TypeError, "expected byte string object for "
+ "header value, value of type %.200s found",
+ value->ob_type->tp_name);
+ return 0;
+ }
+
+ s = PyBytes_AsString(value);
+
+ while (*s) {
+ if (*s == '\r' || *s == '\n') {
+ PyErr_SetString(PyExc_ValueError, "carriage return/line "
+ "feed character present in header value");
+ return 0;
+ }
+ s++;
+ }
+
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_validate.h b/src/server/wsgi_validate.h
new file mode 100644
index 0000000..f8a8d35
--- /dev/null
+++ b/src/server/wsgi_validate.h
@@ -0,0 +1,36 @@
+#ifndef WSGI_VALIDATE_H
+#define WSGI_VALIDATE_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#include "wsgi_python.h"
+
+/* ------------------------------------------------------------------------- */
+
+extern int wsgi_validate_status_line(PyObject *value);
+extern int wsgi_validate_header_name(PyObject *value);
+extern int wsgi_validate_header_value(PyObject *value);
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/src/server/wsgi_version.h b/src/server/wsgi_version.h
new file mode 100644
index 0000000..2715e50
--- /dev/null
+++ b/src/server/wsgi_version.h
@@ -0,0 +1,35 @@
+#ifndef WSGI_VERSION_H
+#define WSGI_VERSION_H
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Copyright 2007-2013 GRAHAM DUMPLETON
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* ------------------------------------------------------------------------- */
+
+/* Module version information. */
+
+#define MOD_WSGI_MAJORVERSION_NUMBER 5
+#define MOD_WSGI_MINORVERSION_NUMBER 0
+#define MOD_WSGI_MICROVERSION_NUMBER 0
+#define MOD_WSGI_VERSION_STRING "5.0.0-beta"
+
+/* ------------------------------------------------------------------------- */
+
+#endif
+
+/* vi: set sw=4 expandtab : */
diff --git a/tests/environ.wsgi b/tests/environ.wsgi
new file mode 100644
index 0000000..9046b93
--- /dev/null
+++ b/tests/environ.wsgi
@@ -0,0 +1,44 @@
+from __future__ import print_function
+
+import os
+import sys
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+def application(environ, start_response):
+ headers = []
+ headers.append(('Content-Type', 'text/plain'))
+ write = start_response('200 OK', headers)
+
+ input = environ['wsgi.input']
+ output = StringIO()
+
+ print('PID: %s' % os.getpid(), file=output)
+ print('UID: %s' % os.getuid(), file=output)
+ print('GID: %s' % os.getgid(), file=output)
+ print(file=output)
+
+ print('PATH: %s' % sys.path, file=output)
+ print(file=output)
+
+ keys = sorted(environ.keys())
+ for key in keys:
+ print('%s: %s' % (key, repr(environ[key])), file=output)
+ print(file=output)
+
+ keys = sorted(os.environ.keys())
+ for key in keys:
+ print('%s: %s' % (key, repr(os.environ[key])), file=output)
+ print(file=output)
+
+ result = output.getvalue()
+
+ if not isinstance(result, bytes):
+ result = result.encode('UTF-8')
+
+ yield result
+
+ yield input.read(int(environ.get('CONTENT_LENGTH', '0')))
diff --git a/tests/hello.wsgi b/tests/hello.wsgi
new file mode 100644
index 0000000..cb48488
--- /dev/null
+++ b/tests/hello.wsgi
@@ -0,0 +1,9 @@
+def application(environ, start_response):
+ status = '200 OK'
+ output = b'Hello World!'
+
+ response_headers = [('Content-type', 'text/plain'),
+ ('Content-Length', str(len(output)))]
+ start_response(status, response_headers)
+
+ return [output]
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..ea952ef
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,2 @@
+[tox]
+envlist = py26,py27,py33
diff --git a/win32-ap22py26.mk b/win32/ap22py26.mk
index 3bf79df..99844f8 100644
--- a/win32-ap22py26.mk
+++ b/win32/ap22py26.mk
@@ -32,8 +32,10 @@ LDLIBS = \
libapr-1.lib \
libaprutil-1.lib
-mod_wsgi.so : mod_wsgi.c
- cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
+SRCFILES = mod_wsgi.c wsgi_apache.c wsgi_convert.c wsgi_validate.c
+
+mod_wsgi.so : $(SRCFILES)
+ cl $(CPPFLAGS) $(CFLAGS) $(SRCFILES) /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
mt -manifest $@.manifest -outputresource:$@;2
clean :
diff --git a/win32-ap22py27.mk b/win32/ap22py27.mk
index 53bbbb8..c85f4c6 100644
--- a/win32-ap22py27.mk
+++ b/win32/ap22py27.mk
@@ -32,8 +32,10 @@ LDLIBS = \
libapr-1.lib \
libaprutil-1.lib
-mod_wsgi.so : mod_wsgi.c
- cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
+SRCFILES = mod_wsgi.c wsgi_apache.c wsgi_convert.c wsgi_validate.c
+
+mod_wsgi.so : $(SRCFILES)
+ cl $(CPPFLAGS) $(CFLAGS) $(SRCFILES) /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
mt -manifest $@.manifest -outputresource:$@;2
clean :
diff --git a/win32-ap22py31.mk b/win32/ap22py31.mk
index 34e13cc..23e2ac6 100644
--- a/win32-ap22py31.mk
+++ b/win32/ap22py31.mk
@@ -32,8 +32,10 @@ LDLIBS = \
libapr-1.lib \
libaprutil-1.lib
-mod_wsgi.so : mod_wsgi.c
- cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
+SRCFILES = mod_wsgi.c wsgi_apache.c wsgi_convert.c wsgi_validate.c
+
+mod_wsgi.so : $(SRCFILES)
+ cl $(CPPFLAGS) $(CFLAGS) $(SRCFILES) /LD $(LDFLAGS) $(LDLIBS) /OUT:$@
mt -manifest $@.manifest -outputresource:$@;2
clean :