summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLorry <lorry@roadtrain.codethink.co.uk>2012-08-28 15:30:14 +0100
committerLorry <lorry@roadtrain.codethink.co.uk>2012-08-28 15:30:14 +0100
commit74dd0183e4e56b07cedfa87eae7a8fb3166f01e8 (patch)
tree4e4ba1caa7f82fd5b1f38c503fac24e5e25b5fc5
downloadlibserf-tarball-74dd0183e4e56b07cedfa87eae7a8fb3166f01e8.tar.gz
Tarball conversion
-rw-r--r--CHANGES153
-rw-r--r--LICENSE201
-rw-r--r--Makefile.in168
-rw-r--r--NOTICE2
-rw-r--r--README38
-rw-r--r--auth/auth.c374
-rw-r--r--auth/auth.h105
-rw-r--r--auth/auth_basic.c155
-rw-r--r--auth/auth_digest.c475
-rw-r--r--auth/auth_kerb.c350
-rw-r--r--auth/auth_kerb.h103
-rw-r--r--auth/auth_kerb_gss.c146
-rw-r--r--auth/auth_kerb_sspi.c210
-rw-r--r--buckets/aggregate_buckets.c400
-rw-r--r--buckets/allocator.c434
-rw-r--r--buckets/barrier_buckets.c97
-rw-r--r--buckets/buckets.c538
-rw-r--r--buckets/bwtp_buckets.c596
-rw-r--r--buckets/chunk_buckets.c235
-rw-r--r--buckets/dechunk_buckets.c185
-rw-r--r--buckets/deflate_buckets.c384
-rw-r--r--buckets/file_buckets.c117
-rw-r--r--buckets/headers_buckets.c429
-rw-r--r--buckets/iovec_buckets.c169
-rw-r--r--buckets/limit_buckets.c134
-rw-r--r--buckets/mmap_buckets.c118
-rw-r--r--buckets/request_buckets.c228
-rw-r--r--buckets/response_buckets.c429
-rw-r--r--buckets/simple_buckets.c142
-rw-r--r--buckets/socket_buckets.c114
-rw-r--r--buckets/ssl_buckets.c1629
-rw-r--r--build/apr_common.m4985
-rwxr-xr-xbuild/config.guess1517
-rwxr-xr-xbuild/config.sub1760
-rw-r--r--build/find_apr.m4202
-rw-r--r--build/find_apu.m4211
-rwxr-xr-xbuild/gen_def.py68
-rwxr-xr-xbuild/get-version.sh37
-rwxr-xr-xbuild/install.sh112
-rw-r--r--build/serf.def136
-rwxr-xr-xbuildconf119
-rw-r--r--config.layout26
-rwxr-xr-xconfigure6257
-rw-r--r--configure.in219
-rw-r--r--context.c371
-rw-r--r--design-guide.txt152
-rw-r--r--incoming.c176
-rw-r--r--outgoing.c1431
-rw-r--r--serf.h1086
-rw-r--r--serf.mak213
-rw-r--r--serf.pc.in13
-rw-r--r--serf_bucket_types.h656
-rw-r--r--serf_bucket_util.h286
-rw-r--r--serf_private.h353
-rwxr-xr-xserfmake485
-rw-r--r--ssltunnel.c168
-rw-r--r--test/CuTest-README.txt225
-rw-r--r--test/CuTest.c367
-rw-r--r--test/CuTest.h146
-rw-r--r--test/serf_bwtp.c635
-rw-r--r--test/serf_get.c503
-rw-r--r--test/serf_request.c79
-rw-r--r--test/serf_response.c161
-rw-r--r--test/serf_server.c147
-rw-r--r--test/serf_spider.c826
-rw-r--r--test/serftestca.pem66
-rw-r--r--test/server/test_server.c359
-rw-r--r--test/server/test_server.h70
-rw-r--r--test/test_all.c101
-rw-r--r--test/test_buckets.c425
-rw-r--r--test/test_context.c1011
-rw-r--r--test/test_serf.h126
-rw-r--r--test/test_ssl.c112
-rw-r--r--test/test_util.c214
-rw-r--r--test/testcases/chunked-empty.response11
-rw-r--r--test/testcases/chunked-trailers.response14
-rw-r--r--test/testcases/chunked.response13
-rw-r--r--test/testcases/deflate.responsebin0 -> 639 bytes
-rw-r--r--test/testcases/simple.request1
-rw-r--r--test/testcases/simple.response32
80 files changed, 31541 insertions, 0 deletions
diff --git a/CHANGES b/CHANGES
new file mode 100644
index 0000000..5124082
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,153 @@
+Serf 1.1.0 [2012-06-07, from /tags/1.1.0]
+ New: serf_bucket_request_set_CL() for C-L based, non-chunked requests
+ New: serf_ssl_server_cert_chain_callback_set() for full-chain validation
+
+
+Serf 1.0.3 [2012-03-20, from /tags/1.0.3, r1586]
+ Map more OpenSSL errors into SERF_SSL_CERT_UNKNOWNCA (r1573)
+
+
+Serf 1.0.2
+ Not released.
+
+
+Serf 1.0.1 [2012-02-15, from /tags/1.0.1, r1569]
+ FreeBSD fixes in the test suite (r1560, r1565)
+ Minor build fixes
+
+
+Serf 1.0.0 [2011-07-15, from /tags/1.0.0, r1540]
+ Fixed issue 38: enable builds using non-GNU make
+ Fixed issue 49: support SSL tunnels for HTTPS via a proxy
+ Fixed issue 56: allow Subject Alternative Name, and enable SNI
+ Fixed issue 61: include order dependencies
+ Fixed issue 66: improved error reporting when creating install dirs
+ Fixed issue 71: handle ECONNREFUSED on Windows
+ Fixed issue 79: destroy the APR allocator, if we create one
+ Fixed issue 81: build failed on APR 0.9.x
+ Major performance improvements and bug fixes for SSL buckets/handling (r1462)
+ Add a new "iovec" bucket type (r1434)
+ Minimize network packet writes based on ra_serf analysis (r1467, r1471)
+ Fix out of order issue with multiple priority requests (r1469)
+ Work around broken WSAPoll() impl on Windows introduced in APR 1.4.0 (r1506)
+ Fix 100% CPU usage with many pipelined requests (r1456)
+ Corrected contents of build/serf.def; it now includes bucket types (r1512)
+ Removed "snapshot" feature from buckets (r1503)
+ Various improvements to the test system
+ Various memory leak fixes
+
+
+Serf 0.7.2 [2011-03-12, from /tags/0.7.2, r1452]
+ Actually disable Nagle when creating a connection (r1441)
+ Return error when app asks for HTTPS over proxy connection (r1433)
+
+
+Serf 0.7.1 [2011-01-25, from /tags/0.7.1, r1432]
+ Fix memory leak when using SSL (r1408, r1416)
+ Fix build for blank apr-util directory (r1421)
+
+
+Serf 0.7.0 [2010-08-25, from /tags/0.7.0, r1407]
+ Fix double free abort when destroying request buckets
+ Fix test server in unit test framework to avoid random test failures
+ Allow older Serf programs which don't use the new authn framework to still
+ handle authn without forcing them to switch to the new framework. (r1401)
+ Remove the SERF_DECLARE macros, preferring a .DEF file for Windows
+ Barrier buckets now pass read_iovec to their wrapped bucket
+ Fix HTTP header parsing to allow for empty header values
+
+
+Serf 0.6.1 [2010-05-14, from /tags/0.6.1, r1370]
+ Generally: this release fixes problems with the 0.4.0 packaging
+ Small compilation fix in outgoing.c for Windows builds
+
+
+Serf 0.6.0
+ Not released.
+
+
+Serf 0.5.0
+ Not released.
+
+
+Serf 0.4.0
+ WITHDRAWN: this release misstated itself as 0.5.0; use a later release
+
+ Provide authn framework, supporting Basic, Digest, Kerberos (SSPI, GSS),
+ along with proxy authn using Basic or Digest
+ Added experimental listener framework, along with test_server.c
+ Improvements and fixes to SSL support, including connection setup changes
+ Experimental support for unrequested, arriving ("async") responses
+ Experimental BWTP support using the async arrival feature
+ Headers are combined on read (not write), to ease certian classes of parsing
+ Experimental feature on aggregate buckets for a callback-on-empty
+ Fix the bucket allocator for when APR is using its pool debugging features
+ Proxy support in the serf_get testing utility
+ Fix to include the port number in the Host header
+ serf_get propagates errors from the response, instead of aborting (Issue 52)
+ Added serf_lib_version() for runtime version tests
+
+
+Serf 0.3.1 [2010-02-14, from /tags/0.3.1, r1322]
+ Fix loss of error on request->setup() callback. (Issue 47)
+ Support APR 2.x. (Issue 48)
+ Fixed slowdown in aggregate bucket with millions of child buckets
+ Avoid hang in apr_pollset_poll() by unclosed connections after fork()
+
+
+Serf 0.3.0 [2009-01-26, from /tags/0.3.0, r1217]
+ Support LTFLAGS override as a config-time env. variable (Issue 44)
+ Fix CUTest test harness compilation on Solaris (Issue 43)
+ Fix small race condition in OpenSSL initialization (Issue 39)
+ Handle content streams larger than 4GB on 32-bit OSes (Issue 41)
+ Fix test_ssl.c compilation with mingw+msys
+ Fix conn close segfault by explicitly closing conn when pool is destroyed
+ Expose the depth of the SSL certificate so the validator can use that info
+ Fix socket address family issue when opening a connection to a proxy
+ Provide new API to take snapshots of buckets
+ Implement snapshot API for simple and aggregate buckets
+ Build with bundled apr and apr-util VPATH builds
+ Build with bundled OpenSSL builds
+
+
+Serf 0.2.0 [2008-06-06, from /tags/0.2.0, r1189]
+ Enable use of external event loop: serf_create_context_ex
+ Enable adding new requests at the beginning of the request queue
+ Handle 'Connection:close' headers
+ Enable limiting the number of outstanding requests
+ Add readline function to simple buckets
+ Concatenate repeated headers using comma as separator, as per RFC 2616,
+ section 4.2. (Issue 29)
+ Add proxy server support
+ Add progress feedback support. (Issue 11)
+ Provide new API to simplify use of proxy and progress feedback support
+ Add callback to validate SSL server certificates. (Issue 31)
+ Add new test framework
+ Send current version string in the test programs (Issue 21)
+ Bugfixes:
+ Fix segfault with epoll when removing a NULL socket
+ Reset OpenSSL thread-safety callbacks when apr_terminate() called
+ Do not remove the socket from the pollset on pool cleanup
+ Do not issue double close on skt w/second one being close(-1) (Issue 33)
+
+
+Serf 0.1.2 [2007-06-18, from /tags/0.1.2, r1115]
+ Enable thread-safety with OpenSSL (Issue 19)
+ Teach serfmake to install headers into include/serf-0
+ Be more tolerant when servers close the connection without telling us
+ Do not open the connection until we have requests to deliver
+ Fix serfmake to produce the library that corresponds to the minor version
+ Fix a memory leak with the socket bucket (Issue 14)
+ Fix uninitialized branch in serf_spider (Issue 15)
+
+
+Serf 0.1.1 [2007-05-12, from /tags/0.1.1, r1105]
+ Add SSL client certificate support
+ Implement optimized iovec reads for header buckets
+ Fix up 'make clean' and 'make distclean' (Issues 9, 10)
+ Add SERF_VERSION_AT_LEAST macro
+ Remove abort() calls (Issue 13)
+
+
+Serf 0.1.0 [2006-12-14, from /tags/0.1.0, r1087]
+ Initial packaged release
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..ba50496
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,168 @@
+#
+# Makefile for Serf
+#
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+SERF_MAJOR_VERSION=@SERF_MAJOR_VERSION@
+SERF_DOTTED_VERSION=@SERF_DOTTED_VERSION@
+
+OBJECTS = buckets/aggregate_buckets.lo buckets/request_buckets.lo context.lo \
+ buckets/buckets.lo buckets/simple_buckets.lo buckets/file_buckets.lo \
+ buckets/mmap_buckets.lo buckets/socket_buckets.lo \
+ buckets/response_buckets.lo buckets/headers_buckets.lo \
+ buckets/allocator.lo buckets/dechunk_buckets.lo \
+ buckets/deflate_buckets.lo buckets/limit_buckets.lo \
+ buckets/ssl_buckets.lo buckets/barrier_buckets.lo \
+ buckets/chunk_buckets.lo buckets/bwtp_buckets.lo \
+ buckets/iovec_buckets.lo \
+ incoming.lo outgoing.lo ssltunnel.lo \
+ auth/auth.lo auth/auth_basic.lo auth/auth_digest.lo \
+ auth/auth_kerb.lo auth/auth_kerb_gss.lo
+
+TARGET_LIB=libserf-$(SERF_MAJOR_VERSION).la
+
+TEST_OBJECTS = test/serf_get.lo test/serf_response.lo test/serf_request.lo \
+ test/serf_spider.lo test/serf_server.lo test/serf_bwtp.lo
+
+TEST_SUITE_OBJECTS = test/CuTest.lo test/test_all.lo test/test_util.lo \
+ test/test_buckets.lo test/test_context.lo \
+ test/test_ssl.lo test/server/test_server.lo
+
+PROGRAMS = $(TEST_OBJECTS:.lo=) test/test_all
+
+TESTCASES = test/testcases/simple.response \
+ test/testcases/chunked-empty.response test/testcases/chunked.response \
+ test/testcases/chunked-trailers.response \
+ test/testcases/deflate.response
+
+HEADERS = serf.h serf_bucket_types.h serf_bucket_util.h
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+pkgconfigdir=$(libdir)/pkgconfig
+
+LIBTOOL = @APR_LIBTOOL@
+LTFLAGS = @LTFLAGS@ --tag=CC
+CC = @CC@
+CFLAGS = @EXTRA_CFLAGS@ @CFLAGS@
+CPPFLAGS = @EXTRA_CPPFLAGS@ @CPPFLAGS@
+INCLUDES = -I$(srcdir) @APR_INCLUDES@ @APU_INCLUDES@ @EXTRA_INCLUDES@
+MKDIR = @mkdir_p@
+INSTALL = @INSTALL@
+
+LDFLAGS = @EXTRA_LDFLAGS@ @LDFLAGS@
+LIBS = @EXTRA_LIBS@ @SERF_LIBS@ -lz -lssl -lcrypto
+
+all: $(TARGET_LIB) $(PROGRAMS)
+
+context.lo: context.c $(HEADERS)
+incoming.lo: incoming.c $(HEADERS)
+outgoing.lo: outgoing.c $(HEADERS)
+ssltunnel.lo: ssltunnel.c $(HEADERS)
+buckets/aggregate_buckets.lo: buckets/aggregate_buckets.c $(HEADERS)
+buckets/request_buckets.lo: buckets/request_buckets.c $(HEADERS)
+buckets/buckets.lo: buckets/buckets.c $(HEADERS)
+buckets/simple_buckets.lo: buckets/simple_buckets.c $(HEADERS)
+buckets/file_buckets.lo: buckets/file_buckets.c $(HEADERS)
+buckets/mmap_buckets.lo: buckets/mmap_buckets.c $(HEADERS)
+buckets/socket_buckets.lo: buckets/socket_buckets.c $(HEADERS)
+buckets/response_buckets.lo: buckets/response_buckets.c $(HEADERS)
+buckets/headers_buckets.lo: buckets/headers_buckets.c $(HEADERS)
+buckets/allocator.lo: buckets/allocator.c $(HEADERS)
+buckets/dechunk_buckets.lo: buckets/dechunk_buckets.c $(HEADERS)
+buckets/deflate_buckets.lo: buckets/deflate_buckets.c $(HEADERS)
+buckets/limit_buckets.lo: buckets/limit_buckets.c $(HEADERS)
+buckets/ssl_buckets.lo: buckets/ssl_buckets.c $(HEADERS)
+buckets/barrier_buckets.lo: buckets/barrier_buckets.c $(HEADERS)
+buckets/chunk_buckets.lo: buckets/chunk_buckets.c $(HEADERS)
+buckets/bwtp_buckets.lo: buckets/bwtp_buckets.c $(HEADERS)
+buckets/iovec_buckets.lo: buckets/iovec_buckets.c $(HEADERS)
+
+test/serf_get.lo: test/serf_get.c $(HEADERS)
+test/serf_response.lo: test/serf_response.c $(HEADERS)
+test/serf_request.lo: test/serf_request.c $(HEADERS)
+test/serf_server.lo: test/serf_server.c $(HEADERS)
+test/serf_spider.lo: test/serf_spider.c $(HEADERS)
+test/serf_bwtp.lo: test/serf_bwtp.c $(HEADERS)
+
+test/CuTest.lo: test/CuTest.c $(HEADERS)
+test/test_all.lo: test/test_all.c $(HEADERS)
+test/test_util.lo: test/test_util.c $(HEADERS)
+test/test_buckets.lo: test/test_buckets.c $(HEADERS)
+test/test_context.lo: test/test_context.c $(HEADERS)
+test/test_ssl.lo: test/test_ssl.c $(HEADERS)
+
+$(TARGET_LIB): $(OBJECTS)
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -rpath $(libdir) -o $@ $(OBJECTS) $(LIBS)
+
+test/serf_get: $(TARGET_LIB) test/serf_get.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_get.lo $(LIBS)
+
+test/serf_response: $(TARGET_LIB) test/serf_response.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_response.lo $(LIBS)
+
+test/serf_request: $(TARGET_LIB) test/serf_request.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_request.lo $(LIBS)
+
+test/serf_server: $(TARGET_LIB) test/serf_server.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_server.lo $(LIBS)
+
+test/serf_spider: $(TARGET_LIB) test/serf_spider.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_spider.lo $(LIBS)
+
+test/serf_bwtp: $(TARGET_LIB) test/serf_bwtp.lo
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) test/serf_bwtp.lo $(LIBS)
+
+test/test_all: $(TARGET_LIB) $(TEST_SUITE_OBJECTS)
+ $(LIBTOOL) $(LTFLAGS) --mode=link $(CC) $(LDFLAGS) -static -o $@ $(TARGET_LIB) $(TEST_SUITE_OBJECTS) $(LIBS)
+
+check: test/serf_response test/test_all
+ @for i in $(TESTCASES); \
+ do echo "== Testing $$i =="; \
+ ./test/serf_response $(srcdir)/$$i; \
+ done;
+ @echo "== Running test_all ==";
+ @./test/test_all
+
+install: $(TARGET_LIB)
+ $(MKDIR) $(DESTDIR)$(libdir) $(DESTDIR)$(includedir) $(DESTDIR)$(pkgconfigdir)
+ $(LIBTOOL) $(LTFLAGS) --mode=install $(INSTALL) -m 644 $(TARGET_LIB) $(DESTDIR)$(libdir)
+ for i in $(srcdir)/*.h; do \
+ $(INSTALL) -m 644 $$i $(DESTDIR)$(includedir); \
+ done
+ $(INSTALL) -m 644 serf.pc $(DESTDIR)$(pkgconfigdir)/serf-$(SERF_MAJOR_VERSION).pc
+
+clean:
+ rm -f $(TARGET_LIB) $(OBJECTS) $(OBJECTS:.lo=.o) $(PROGRAMS) $(TEST_OBJECTS) $(TEST_OBJECTS:.lo=.o) $(TEST_SUITE_OBJECTS) $(TEST_SUITE_OBJECTS:.lo=.o)
+ for subdir in . buckets test; do \
+ (cd $$subdir && rm -rf .libs) ; \
+ done
+
+distclean: clean
+ rm -f Makefile serf.pc config.log config.status
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o
+
+mkdir-vpath:
+ @if [ ! -d auth ]; then \
+ $(MKDIR) auth; \
+ fi;
+ @if [ ! -d buckets ]; then \
+ $(MKDIR) buckets; \
+ fi;
+ @if [ ! -d test ]; then \
+ $(MKDIR) test; \
+ fi;
+ @if [ ! -d test/server ]; then \
+ $(MKDIR) test/server; \
+ fi;
+ @if [ ! -r test/serftestca.pem ]; then \
+ cp -f $(srcdir)/test/serftestca.pem test/; \
+ fi;
+
+.c.lo:
+ $(LIBTOOL) $(LTFLAGS) --mode=compile $(CC) $(INCLUDES) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< && touch $@
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..3f59805
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,2 @@
+This product includes software developed by
+The Apache Software Foundation (http://www.apache.org/).
diff --git a/README b/README
new file mode 100644
index 0000000..cca0394
--- /dev/null
+++ b/README
@@ -0,0 +1,38 @@
+Welcome to serf, a high-performance asynchronous HTTP client library.
+
+The serf library is a C-based HTTP client library built upon the Apache
+Portable Runtime (APR) library. It multiplexes connections, running the
+read/write communication asynchronously. Memory copies and transformations are
+kept to a minimum to provide high performance operation.
+
+ * Status: http://code.google.com/p/serf/wiki/
+ * Site: http://code.google.com/p/serf/
+ * Code: http://serf.googlecode.com/svn/
+ * Issues: http://code.google.com/p/serf/issues/list
+ * Mail: serf-dev@googlegroups.com
+ * People: Justin Erenkrantz, Greg Stein
+
+----
+
+Quick guide for the impatient
+
+ (Unix)
+ % ./configure
+ % make
+ % make install
+
+----
+
+Building serf from a Subversion checkout (non-packaged releases)
+
+We suggest that you try out 'serfmake'.
+
+ % ./serfmake --prefix=/usr/local/serf --with-apr=/usr/local/apr install
+
+If you want to use the autoconf build system and are using a Subversion
+checkout, you need to run buildconf and have APR and APR-util sources handy.
+
+ % ./buildconf --with-apr=/path/to/apr --with-apr-util=/path/to/apr-util
+ (By default, buildconf will look in . and ../ for apr and apr-util.)
+
+Then, you can use ./configure, make, etc.
diff --git a/auth/auth.c b/auth/auth.c
new file mode 100644
index 0000000..13f822b
--- /dev/null
+++ b/auth/auth.c
@@ -0,0 +1,374 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "serf.h"
+#include "serf_private.h"
+#include "auth.h"
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+static apr_status_t
+default_auth_response_handler(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+static const serf__authn_scheme_t serf_authn_schemes[] = {
+ {
+ 401,
+ "Basic",
+ SERF_AUTHN_BASIC,
+ serf__init_basic,
+ serf__init_basic_connection,
+ serf__handle_basic_auth,
+ serf__setup_request_basic_auth,
+ default_auth_response_handler,
+ },
+ {
+ 407,
+ "Basic",
+ SERF_AUTHN_BASIC,
+ serf__init_basic,
+ serf__init_basic_connection,
+ serf__handle_basic_auth,
+ serf__setup_request_basic_auth,
+ default_auth_response_handler,
+ },
+ {
+ 401,
+ "Digest",
+ SERF_AUTHN_DIGEST,
+ serf__init_digest,
+ serf__init_digest_connection,
+ serf__handle_digest_auth,
+ serf__setup_request_digest_auth,
+ serf__validate_response_digest_auth,
+ },
+ {
+ 407,
+ "Digest",
+ SERF_AUTHN_DIGEST,
+ serf__init_digest,
+ serf__init_digest_connection,
+ serf__handle_digest_auth,
+ serf__setup_request_digest_auth,
+ serf__validate_response_digest_auth,
+ },
+#ifdef SERF_HAVE_KERB
+ {
+ 401,
+ "Negotiate",
+ SERF_AUTHN_NEGOTIATE,
+ serf__init_kerb,
+ serf__init_kerb_connection,
+ serf__handle_kerb_auth,
+ serf__setup_request_kerb_auth,
+ serf__validate_response_kerb_auth,
+ },
+#endif
+ /* ADD NEW AUTHENTICATION IMPLEMENTATIONS HERE (as they're written) */
+
+ /* sentinel */
+ { 0 }
+};
+
+
+/**
+ * Baton passed to the response header callback function
+ */
+typedef struct {
+ int code;
+ apr_status_t status;
+ const char *header;
+ serf_request_t *request;
+ serf_bucket_t *response;
+ void *baton;
+ apr_pool_t *pool;
+ const serf__authn_scheme_t *scheme;
+ const char *last_scheme_name;
+} auth_baton_t;
+
+/* Reads and discards all bytes in the response body. */
+static apr_status_t discard_body(serf_bucket_t *response)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ while (1) {
+ status = serf_bucket_read(response, SERF_READ_ALL_AVAIL, &data, &len);
+
+ if (status) {
+ return status;
+ }
+
+ /* feed me */
+ }
+}
+
+/**
+ * handle_auth_header is called for each header in the response. It filters
+ * out the Authenticate headers (WWW or Proxy depending on what's needed) and
+ * tries to find a matching scheme handler.
+ *
+ * Returns a non-0 value of a matching handler was found.
+ */
+static int handle_auth_header(void *baton,
+ const char *key,
+ const char *header)
+{
+ auth_baton_t *ab = baton;
+ int scheme_found = FALSE;
+ const char *auth_name;
+ const char *auth_attr;
+ const serf__authn_scheme_t *scheme = NULL;
+ serf_connection_t *conn = ab->request->conn;
+ serf_context_t *ctx = conn->ctx;
+
+ /* We're only interested in xxxx-Authenticate headers. */
+ if (strcmp(key, ab->header) != 0)
+ return 0;
+
+ /* Extract the authentication scheme name, and prepare for reading
+ the attributes. */
+ auth_attr = strchr(header, ' ');
+ if (auth_attr) {
+ auth_name = apr_pstrmemdup(ab->pool, header, auth_attr - header);
+ ++auth_attr;
+ }
+ else
+ auth_name = header;
+
+ ab->last_scheme_name = auth_name;
+
+ /* Find the matching authentication handler.
+ Note that we don't reuse the auth scheme stored in the context,
+ as that may have changed. (ex. fallback from ntlm to basic.) */
+ for (scheme = serf_authn_schemes; scheme->code != 0; ++scheme) {
+ if (ab->code == scheme->code &&
+ strcmp(auth_name, scheme->name) == 0 &&
+ ctx->authn_types & scheme->type) {
+ serf__auth_handler_func_t handler = scheme->handle_func;
+ apr_status_t status = 0;
+
+ /* If this is the first time we use this scheme on this connection,
+ make sure to initialize the authentication handler first. */
+ if (ab->code == 401 && ctx->authn_info.scheme != scheme) {
+ status = scheme->init_ctx_func(ab->code, ctx, ctx->pool);
+ if (!status) {
+ status = scheme->init_conn_func(ab->code, conn, conn->pool);
+
+ if (!status)
+ ctx->authn_info.scheme = scheme;
+ else
+ ctx->authn_info.scheme = NULL;
+ }
+ }
+ else if (ab->code == 407 && ctx->proxy_authn_info.scheme != scheme) {
+ status = scheme->init_ctx_func(ab->code, ctx, ctx->pool);
+ if (!status) {
+ status = scheme->init_conn_func(ab->code, conn, conn->pool);
+
+ if (!status)
+ ctx->proxy_authn_info.scheme = scheme;
+ else
+ ctx->proxy_authn_info.scheme = NULL;
+ }
+ }
+
+ if (!status) {
+ scheme_found = TRUE;
+ ab->scheme = scheme;
+ status = handler(ab->code, ab->request, ab->response,
+ header, auth_attr, ab->baton, ctx->pool);
+ }
+
+ /* If the authentication fails, cache the error for now. Try the
+ next available scheme. If there's none raise the error. */
+ if (status) {
+ scheme_found = FALSE;
+ scheme = NULL;
+ }
+ /* Let the caller now if the authentication setup was succesful
+ or not. */
+ ab->status = status;
+
+ break;
+ }
+ }
+
+ /* If a matching scheme handler was found, we can stop iterating
+ over the response headers - so return a non-0 value. */
+ return scheme_found;
+}
+
+/* Dispatch authentication handling. This function matches the possible
+ authentication mechanisms with those available. Server and proxy
+ authentication are evaluated separately. */
+static apr_status_t dispatch_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *hdrs;
+
+ if (code == 401 || code == 407) {
+ auth_baton_t ab = { 0 };
+ const char *auth_hdr;
+
+ ab.code = code;
+ ab.status = APR_SUCCESS;
+ ab.request = request;
+ ab.response = response;
+ ab.baton = baton;
+ ab.pool = pool;
+
+ /* Before iterating over all authn headers, check if there are any. */
+ if (code == 401)
+ ab.header = "WWW-Authenticate";
+ else
+ ab.header = "Proxy-Authenticate";
+
+ hdrs = serf_bucket_response_get_headers(response);
+ auth_hdr = serf_bucket_headers_get(hdrs, ab.header);
+
+ if (!auth_hdr) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ /* Iterate over all headers. Try to find a matching authentication scheme
+ handler.
+
+ Note: it is possible to have multiple Authentication: headers. We do
+ not want to combine them (per normal header combination rules) as that
+ would make it hard to parse. Instead, we want to individually parse
+ and handle each header in the response, looking for one that we can
+ work with.
+ */
+ serf_bucket_headers_do(hdrs,
+ handle_auth_header,
+ &ab);
+ if (ab.status != APR_SUCCESS)
+ return ab.status;
+
+ if (!ab.scheme || ab.scheme->name == NULL) {
+ /* No matching authentication found. */
+ return SERF_ERROR_AUTHN_NOT_SUPPORTED;
+ }
+ } else {
+ /* Validate the response authn headers if needed. */
+
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Read the headers of the response and try the available
+ handlers if authentication or validation is needed. */
+apr_status_t serf__handle_auth_response(int *consumed_response,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ serf_status_line sl;
+
+ *consumed_response = 0;
+
+ status = serf_bucket_response_status(response, &sl);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ if (!sl.version && (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status))) {
+ return status;
+ }
+
+ status = serf_bucket_response_wait_for_headers(response);
+ if (status) {
+ if (!APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ /* If status is APR_EOF, there were no headers to read.
+ This can be ok in some situations, and it definitely
+ means there's no authentication requested now. */
+ return APR_SUCCESS;
+ }
+
+ if (sl.code == 401 || sl.code == 407) {
+ /* Authentication requested. */
+
+ /* Don't bother handling the authentication request if the response
+ wasn't received completely yet. Serf will call serf__handle_auth_response
+ again when more data is received. */
+ status = discard_body(response);
+ *consumed_response = 1;
+
+ /* Discard all response body before processing authentication. */
+ if (!APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ status = dispatch_auth(sl.code, request, response, baton, pool);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ /* Requeue the request with the necessary auth headers. */
+ /* ### Application doesn't know about this request! */
+ serf_connection_priority_request_create(request->conn,
+ request->setup,
+ request->setup_baton);
+
+ return APR_EOF;
+ }
+
+ return APR_SUCCESS;
+}
+
+/**
+ * base64 encode the authentication data and build an authentication
+ * header in this format:
+ * [SCHEME] [BASE64 of auth DATA]
+ */
+void serf__encode_auth_header(const char **header,
+ const char *scheme,
+ const char *data, apr_size_t data_len,
+ apr_pool_t *pool)
+{
+ apr_size_t encoded_len, scheme_len;
+ char *ptr;
+
+ encoded_len = apr_base64_encode_len(data_len);
+ scheme_len = strlen(scheme);
+
+ ptr = apr_palloc(pool, encoded_len + scheme_len + 1);
+ *header = ptr;
+
+ apr_cpystrn(ptr, scheme, scheme_len + 1);
+ ptr += scheme_len;
+ *ptr++ = ' ';
+
+ apr_base64_encode(ptr, data, data_len);
+}
diff --git a/auth/auth.h b/auth/auth.h
new file mode 100644
index 0000000..1f2c751
--- /dev/null
+++ b/auth/auth.h
@@ -0,0 +1,105 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTH_H
+#define AUTH_H
+
+#include "auth_kerb.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void serf__encode_auth_header(const char **header, const char *protocol,
+ const char *data, apr_size_t data_len,
+ apr_pool_t *pool);
+
+/** Basic authentication **/
+apr_status_t serf__init_basic(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_basic_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_basic_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_basic_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+
+/** Digest authentication **/
+apr_status_t serf__init_digest(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_digest_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_digest_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_digest_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+apr_status_t serf__validate_response_digest_auth(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+
+#ifdef SERF_HAVE_KERB
+/** Kerberos authentication **/
+apr_status_t serf__init_kerb(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool);
+apr_status_t serf__init_kerb_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+apr_status_t serf__handle_kerb_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+apr_status_t serf__setup_request_kerb_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+apr_status_t serf__validate_response_kerb_auth(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+#endif /* SERF_HAVE_SPNEGO */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !AUTH_H */
diff --git a/auth/auth_basic.c b/auth/auth_basic.c
new file mode 100644
index 0000000..b876cb8
--- /dev/null
+++ b/auth/auth_basic.c
@@ -0,0 +1,155 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Basic authentication ***/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+typedef struct basic_authn_info_t {
+ const char *header;
+ const char *value;
+} basic_authn_info_t;
+
+apr_status_t
+serf__handle_basic_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ const char *tmp;
+ apr_size_t tmp_len;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info = (code == 401) ? &ctx->authn_info :
+ &ctx->proxy_authn_info;
+ basic_authn_info_t *basic_info = authn_info->baton;
+ apr_status_t status;
+ apr_pool_t *cred_pool;
+ char *username, *password;
+
+ /* Can't do Basic authentication if there's no callback to get
+ username & password. */
+ if (!ctx->cred_cb) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ if (!authn_info->realm) {
+ char *realm_name = NULL;
+ const char *eq = strchr(auth_attr, '=');
+
+ if (eq && strncasecmp(auth_attr, "realm", 5) == 0) {
+ realm_name = apr_pstrdup(pool, eq + 1);
+ if (realm_name[0] == '\"') {
+ apr_size_t realm_len;
+
+ realm_len = strlen(realm_name);
+ if (realm_name[realm_len - 1] == '\"') {
+ realm_name[realm_len - 1] = '\0';
+ realm_name++;
+ }
+ }
+ }
+
+ if (!realm_name) {
+ return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
+ }
+
+ authn_info->realm = apr_psprintf(conn->pool, "<%s://%s:%d> %s",
+ conn->host_info.scheme,
+ conn->host_info.hostname,
+ conn->host_info.port,
+ realm_name);
+ }
+
+ /* Ask the application for credentials */
+ apr_pool_create(&cred_pool, pool);
+ status = (*ctx->cred_cb)(&username, &password, request, baton,
+ code, authn_info->scheme->name,
+ authn_info->realm, cred_pool);
+ if (status) {
+ apr_pool_destroy(cred_pool);
+ return status;
+ }
+
+ tmp = apr_pstrcat(conn->pool, username, ":", password, NULL);
+ tmp_len = strlen(tmp);
+ apr_pool_destroy(cred_pool);
+
+ serf__encode_auth_header(&basic_info->value,
+ authn_info->scheme->name,
+ tmp, tmp_len, pool);
+ basic_info->header = (code == 401) ? "Authorization" : "Proxy-Authorization";
+
+ return APR_SUCCESS;
+}
+
+/* For Basic authentication we expect all authn info to be the same for all
+ connections in the context (same realm, username, password). Therefore we
+ can keep the header value in the context instead of per connection. */
+apr_status_t
+serf__init_basic(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ if (code == 401) {
+ ctx->authn_info.baton = apr_pcalloc(pool, sizeof(basic_authn_info_t));
+ } else {
+ ctx->proxy_authn_info.baton = apr_pcalloc(pool, sizeof(basic_authn_info_t));
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_basic_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__setup_request_basic_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ serf_context_t *ctx = conn->ctx;
+ basic_authn_info_t *authn_info;
+
+ if (code == 401) {
+ authn_info = ctx->authn_info.baton;
+ } else {
+ authn_info = ctx->proxy_authn_info.baton;
+ }
+
+ if (authn_info && authn_info->header && authn_info->value) {
+ serf_bucket_headers_setn(hdrs_bkt, authn_info->header,
+ authn_info->value);
+ return APR_SUCCESS;
+ }
+
+ return SERF_ERROR_AUTHN_FAILED;
+}
diff --git a/auth/auth_digest.c b/auth/auth_digest.c
new file mode 100644
index 0000000..2e29180
--- /dev/null
+++ b/auth/auth_digest.c
@@ -0,0 +1,475 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Digest authentication ***/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+#include <apr_uuid.h>
+#include <apr_md5.h>
+
+/** Digest authentication, implements RFC 2617. **/
+
+/* Stores the context information related to Digest authentication.
+ The context is per connection. */
+typedef struct digest_authn_info_t {
+ /* nonce-count for digest authentication */
+ unsigned int digest_nc;
+
+ const char *header;
+
+ const char *ha1;
+
+ const char *realm;
+ const char *cnonce;
+ const char *nonce;
+ const char *opaque;
+ const char *algorithm;
+ const char *qop;
+ const char *username;
+
+ apr_pool_t *pool;
+} digest_authn_info_t;
+
+static char
+int_to_hex(int v)
+{
+ return (v < 10) ? '0' + v : 'a' + (v - 10);
+}
+
+/**
+ * Convert a string if ASCII characters HASHVAL to its hexadecimal
+ * representation.
+ *
+ * The returned string will be allocated in the POOL and be null-terminated.
+ */
+static const char *
+hex_encode(const unsigned char *hashval,
+ apr_pool_t *pool)
+{
+ int i;
+ char *hexval = apr_palloc(pool, (APR_MD5_DIGESTSIZE * 2) + 1);
+ for (i = 0; i < APR_MD5_DIGESTSIZE; i++) {
+ hexval[2 * i] = int_to_hex((hashval[i] >> 4) & 0xf);
+ hexval[2 * i + 1] = int_to_hex(hashval[i] & 0xf);
+ }
+ hexval[APR_MD5_DIGESTSIZE * 2] = '\0';
+ return hexval;
+}
+
+/**
+ * Returns a 36-byte long string of random characters.
+ * UUIDs are formatted as: 00112233-4455-6677-8899-AABBCCDDEEFF.
+ *
+ * The returned string will be allocated in the POOL and be null-terminated.
+ */
+static const char *
+random_cnonce(apr_pool_t *pool)
+{
+ apr_uuid_t uuid;
+ char *buf = apr_palloc(pool, APR_UUID_FORMATTED_LENGTH + 1);
+
+ apr_uuid_get(&uuid);
+ apr_uuid_format(buf, &uuid);
+
+ return hex_encode((unsigned char*)buf, pool);
+}
+
+static const char *
+build_digest_ha1(const char *username,
+ const char *password,
+ const char *realm_name,
+ apr_pool_t *pool)
+{
+ const char *tmp;
+ unsigned char ha1[APR_MD5_DIGESTSIZE];
+ apr_status_t status;
+
+ /* calculate ha1:
+ MD5 hash of the combined user name, authentication realm and password */
+ tmp = apr_psprintf(pool, "%s:%s:%s",
+ username,
+ realm_name,
+ password);
+ status = apr_md5(ha1, tmp, strlen(tmp));
+
+ return hex_encode(ha1, pool);
+}
+
+static const char *
+build_digest_ha2(const char *uri,
+ const char *method,
+ const char *qop,
+ apr_pool_t *pool)
+{
+ if (!qop || strcmp(qop, "auth") == 0) {
+ const char *tmp;
+ unsigned char ha2[APR_MD5_DIGESTSIZE];
+ apr_status_t status;
+
+ /* calculate ha2:
+ MD5 hash of the combined method and URI */
+ tmp = apr_psprintf(pool, "%s:%s",
+ method,
+ uri);
+ status = apr_md5(ha2, tmp, strlen(tmp));
+
+ return hex_encode(ha2, pool);
+ } else {
+ /* TODO: auth-int isn't supported! */
+ }
+
+ return NULL;
+}
+
+static const char *
+build_auth_header(digest_authn_info_t *digest_info,
+ const char *path,
+ const char *method,
+ apr_pool_t *pool)
+{
+ char *hdr;
+ const char *ha2;
+ const char *response;
+ unsigned char response_hdr[APR_MD5_DIGESTSIZE];
+ const char *response_hdr_hex;
+ apr_status_t status;
+
+ ha2 = build_digest_ha2(path, method, digest_info->qop, pool);
+
+ hdr = apr_psprintf(pool,
+ "Digest realm=\"%s\","
+ " username=\"%s\","
+ " nonce=\"%s\","
+ " uri=\"%s\"",
+ digest_info->realm, digest_info->username,
+ digest_info->nonce,
+ path);
+
+ if (digest_info->qop) {
+ if (! digest_info->cnonce)
+ digest_info->cnonce = random_cnonce(digest_info->pool);
+
+ hdr = apr_psprintf(pool, "%s, nc=%08x, cnonce=\"%s\", qop=\"%s\"",
+ hdr,
+ digest_info->digest_nc,
+ digest_info->cnonce,
+ digest_info->qop);
+
+ /* Build the response header:
+ MD5 hash of the combined HA1 result, server nonce (nonce),
+ request counter (nc), client nonce (cnonce),
+ quality of protection code (qop) and HA2 result. */
+ response = apr_psprintf(pool, "%s:%s:%08x:%s:%s:%s",
+ digest_info->ha1, digest_info->nonce,
+ digest_info->digest_nc,
+ digest_info->cnonce, digest_info->qop, ha2);
+ } else {
+ /* Build the response header:
+ MD5 hash of the combined HA1 result, server nonce (nonce)
+ and HA2 result. */
+ response = apr_psprintf(pool, "%s:%s:%s",
+ digest_info->ha1, digest_info->nonce, ha2);
+ }
+
+ status = apr_md5(response_hdr, response, strlen(response));
+ response_hdr_hex = hex_encode(response_hdr, pool);
+
+ hdr = apr_psprintf(pool, "%s, response=\"%s\"", hdr, response_hdr_hex);
+
+ if (digest_info->opaque) {
+ hdr = apr_psprintf(pool, "%s, opaque=\"%s\"", hdr,
+ digest_info->opaque);
+ }
+ if (digest_info->algorithm) {
+ hdr = apr_psprintf(pool, "%s, algorithm=\"%s\"", hdr,
+ digest_info->algorithm);
+ }
+
+ return hdr;
+}
+
+apr_status_t
+serf__handle_digest_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ char *attrs;
+ char *nextkv;
+ const char *realm_name = NULL;
+ const char *nonce = NULL;
+ const char *algorithm = NULL;
+ const char *qop = NULL;
+ const char *opaque = NULL;
+ const char *key;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info = (code == 401) ? &ctx->authn_info :
+ &ctx->proxy_authn_info;
+ digest_authn_info_t *digest_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+ apr_status_t status;
+ apr_pool_t *cred_pool;
+ char *username, *password;
+
+ /* Can't do Digest authentication if there's no callback to get
+ username & password. */
+ if (!ctx->cred_cb) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+
+ /* Need a copy cuz we're going to write NUL characters into the string. */
+ attrs = apr_pstrdup(pool, auth_attr);
+
+ /* We're expecting a list of key=value pairs, separated by a comma.
+ Ex. realm="SVN Digest",
+ nonce="f+zTl/leBAA=e371bd3070adfb47b21f5fc64ad8cc21adc371a5",
+ algorithm=MD5, qop="auth" */
+ for ( ; (key = apr_strtok(attrs, ",", &nextkv)) != NULL; attrs = NULL) {
+ char *val;
+
+ val = strchr(key, '=');
+ if (val == NULL)
+ continue;
+ *val++ = '\0';
+
+ /* skip leading spaces */
+ while (*key && *key == ' ')
+ key++;
+
+ /* If the value is quoted, then remove the quotes. */
+ if (*val == '"') {
+ apr_size_t last = strlen(val) - 1;
+
+ if (val[last] == '"') {
+ val[last] = '\0';
+ val++;
+ }
+ }
+
+ if (strcmp(key, "realm") == 0)
+ realm_name = val;
+ else if (strcmp(key, "nonce") == 0)
+ nonce = val;
+ else if (strcmp(key, "algorithm") == 0)
+ algorithm = val;
+ else if (strcmp(key, "qop") == 0)
+ qop = val;
+ else if (strcmp(key, "opaque") == 0)
+ opaque = val;
+
+ /* Ignore all unsupported attributes. */
+ }
+
+ if (!realm_name) {
+ return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE;
+ }
+
+ authn_info->realm = apr_psprintf(conn->pool, "<%s://%s:%d> %s",
+ conn->host_info.scheme,
+ conn->host_info.hostname,
+ conn->host_info.port,
+ realm_name);
+
+ /* Ask the application for credentials */
+ apr_pool_create(&cred_pool, pool);
+ status = (*ctx->cred_cb)(&username, &password, request, baton,
+ code, authn_info->scheme->name,
+ authn_info->realm, cred_pool);
+ if (status) {
+ apr_pool_destroy(cred_pool);
+ return status;
+ }
+
+ digest_info->header = (code == 401) ? "Authorization" :
+ "Proxy-Authorization";
+
+ /* Store the digest authentication parameters in the context relative
+ to this connection, so we can use it to create the Authorization header
+ when setting up requests. */
+ digest_info->pool = conn->pool;
+ digest_info->qop = apr_pstrdup(digest_info->pool, qop);
+ digest_info->nonce = apr_pstrdup(digest_info->pool, nonce);
+ digest_info->cnonce = NULL;
+ digest_info->opaque = apr_pstrdup(digest_info->pool, opaque);
+ digest_info->algorithm = apr_pstrdup(digest_info->pool, algorithm);
+ digest_info->realm = apr_pstrdup(digest_info->pool, realm_name);
+ digest_info->username = apr_pstrdup(digest_info->pool, username);
+ digest_info->digest_nc++;
+
+ digest_info->ha1 = build_digest_ha1(username, password, digest_info->realm,
+ digest_info->pool);
+
+ apr_pool_destroy(cred_pool);
+
+ /* If the handshake is finished tell serf it can send as much requests as it
+ likes. */
+ serf_connection_set_max_outstanding_requests(conn, 0);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_digest(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_digest_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ /* Digest authentication is done per connection, so keep all progress
+ information per connection. */
+ if (code == 401) {
+ conn->authn_baton = apr_pcalloc(pool, sizeof(digest_authn_info_t));
+ } else {
+ conn->proxy_authn_baton = apr_pcalloc(pool, sizeof(digest_authn_info_t));
+ }
+
+ /* Make serf send the initial requests one by one */
+ serf_connection_set_max_outstanding_requests(conn, 1);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__setup_request_digest_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ digest_authn_info_t *digest_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+ apr_status_t status = APR_SUCCESS;
+
+ if (digest_info && digest_info->realm) {
+ const char *value;
+ apr_uri_t parsed_uri;
+
+ /* extract path from uri */
+ status = apr_uri_parse(conn->pool, uri, &parsed_uri);
+
+ /* Build a new Authorization header. */
+ digest_info->header = (code == 401) ? "Authorization" :
+ "Proxy-Authorization";
+ value = build_auth_header(digest_info, parsed_uri.path, method,
+ conn->pool);
+
+ serf_bucket_headers_setn(hdrs_bkt, digest_info->header,
+ value);
+ digest_info->digest_nc++;
+ }
+
+ return status;
+}
+
+apr_status_t
+serf__validate_response_digest_auth(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ const char *key;
+ char *auth_attr;
+ char *nextkv;
+ const char *rspauth = NULL;
+ const char *qop = NULL;
+ const char *nc_str = NULL;
+ serf_bucket_t *hdrs;
+ digest_authn_info_t *digest_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+
+ hdrs = serf_bucket_response_get_headers(response);
+
+ /* Need a copy cuz we're going to write NUL characters into the string. */
+ if (code == 401)
+ auth_attr = apr_pstrdup(pool,
+ serf_bucket_headers_get(hdrs, "Authentication-Info"));
+ else
+ auth_attr = apr_pstrdup(pool,
+ serf_bucket_headers_get(hdrs, "Proxy-Authentication-Info"));
+
+ /* If there's no Authentication-Info header there's nothing to validate. */
+ if (! auth_attr)
+ return APR_SUCCESS;
+
+ /* We're expecting a list of key=value pairs, separated by a comma.
+ Ex. rspauth="8a4b8451084b082be6b105e2b7975087",
+ cnonce="346531653132652d303033392d3435", nc=00000007,
+ qop=auth */
+ for ( ; (key = apr_strtok(auth_attr, ",", &nextkv)) != NULL; auth_attr = NULL) {
+ char *val;
+
+ val = strchr(key, '=');
+ if (val == NULL)
+ continue;
+ *val++ = '\0';
+
+ /* skip leading spaces */
+ while (*key && *key == ' ')
+ key++;
+
+ /* If the value is quoted, then remove the quotes. */
+ if (*val == '"') {
+ apr_size_t last = strlen(val) - 1;
+
+ if (val[last] == '"') {
+ val[last] = '\0';
+ val++;
+ }
+ }
+
+ if (strcmp(key, "rspauth") == 0)
+ rspauth = val;
+ else if (strcmp(key, "qop") == 0)
+ qop = val;
+ else if (strcmp(key, "nc") == 0)
+ nc_str = val;
+ }
+
+ if (rspauth) {
+ const char *ha2, *tmp, *resp_hdr_hex;
+ unsigned char resp_hdr[APR_MD5_DIGESTSIZE];
+
+ ha2 = build_digest_ha2(conn->host_info.path, "", qop, pool);
+ tmp = apr_psprintf(pool, "%s:%s:%s:%s:%s:%s",
+ digest_info->ha1, digest_info->nonce, nc_str,
+ digest_info->cnonce, digest_info->qop, ha2);
+ apr_md5(resp_hdr, tmp, strlen(tmp));
+ resp_hdr_hex = hex_encode(resp_hdr, pool);
+
+ /* Incorrect response-digest in Authentication-Info header. */
+ if (strcmp(rspauth, resp_hdr_hex) != 0) {
+ return SERF_ERROR_AUTHN_FAILED;
+ }
+ }
+
+ return APR_SUCCESS;
+}
diff --git a/auth/auth_kerb.c b/auth/auth_kerb.c
new file mode 100644
index 0000000..f9be9a4
--- /dev/null
+++ b/auth/auth_kerb.c
@@ -0,0 +1,350 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "auth_kerb.h"
+
+#ifdef SERF_HAVE_KERB
+
+/*** Kerberos authentication ***/
+
+#include <serf.h>
+#include <serf_private.h>
+#include <auth/auth.h>
+
+#include <apr.h>
+#include <apr_base64.h>
+#include <apr_strings.h>
+
+/** These functions implements Kerberos authentication, using GSS-API
+ * (RFC 2743). The message-exchange is documented in RFC 4559.
+ *
+ * Note: this implementation uses gssapi and only works on *nix.
+ **/
+
+/** TODO:
+ ** - send session key directly on new connections where we already know
+ ** the server requires Kerberos authn.
+ ** - fix authn status, as the COMPLETE/CONTINUE status values
+ ** are never used.
+ ** - Add a way for serf to give detailed error information back to the
+ ** application.
+ ** - proxy support
+ **/
+
+/* Authentication over HTTP using Kerberos
+ *
+ * Kerberos involves three servers:
+ * - Authentication Server (AS): verifies users during login
+ * - Ticket-Granting Server (TGS): issues proof of identity tickets
+ * - HTTP server (S)
+ *
+ * Steps:
+ * 0. User logs in to the AS and receives a TGS ticket. On workstations
+ * where the login program doesn't support Kerberos, the user can use
+ * 'kinit'.
+ *
+ * 1. C --> S: GET
+ *
+ * C <-- S: 401 Authentication Required
+ * WWW-Authenticate: Negotiate
+ *
+ * -> app contacts the TGS to request a session key for the HTTP service
+ * @ target host. The returned session key is encrypted with the HTTP
+ * service's secret key, so we can safely send it to the server.
+ *
+ * 2. C --> S: GET
+ * Authorization: Negotiate <Base64 encoded session key>
+ * gss_api_ctx->state = gss_api_auth_in_progress;
+ *
+ * C <-- S: 200 OK
+ * WWW-Authenticate: Negotiate <Base64 encoded server
+ * authentication data>
+ *
+ * -> The server returned a key to proof itself to us. We check this key
+ * with the TGS again.
+ *
+ * Note: It's possible that the server returns 401 again in step 3, if the
+ * Kerberos context isn't complete yet. Some (simple) tests with
+ * mod_auth_kerb and MIT Kerberos 5 show this never happens.
+ *
+ * This handshake is required for every new connection. If the handshake is
+ * completed successfully, all other requests on the same connection will
+ * be authenticated without needing to pass the WWW-Authenticate header.
+ *
+ * Note: Step 1 of the handshake will only happen on the first connection, once
+ * we know the server requires Kerberos authentication, the initial requests
+ * on the other connections will include a session key, so we start at
+ * step 2 in the handshake.
+ * ### TODO: Not implemented yet!
+ */
+
+typedef enum {
+ gss_api_auth_not_started,
+ gss_api_auth_in_progress,
+ gss_api_auth_completed,
+} gss_api_auth_state;
+
+/* HTTP Service name, used to get the session key. */
+#define KRB_HTTP_SERVICE "HTTP"
+
+/* Stores the context information related to Kerberos authentication. */
+typedef struct
+{
+ apr_pool_t *pool;
+
+ /* GSSAPI context */
+ serf__kerb_context_t *gss_ctx;
+
+ /* Current state of the authentication cycle. */
+ gss_api_auth_state state;
+
+ const char *header;
+ const char *value;
+} gss_authn_info_t;
+
+/* On the initial 401 response of the server, request a session key from
+ the Kerberos KDC to pass to the server, proving that we are who we
+ claim to be. The session key can only be used with the HTTP service
+ on the target host. */
+static apr_status_t
+gss_api_get_credentials(char *token, apr_size_t token_len,
+ const char *hostname,
+ const char **buf, apr_size_t *buf_len,
+ gss_authn_info_t *gss_info)
+{
+ serf__kerb_buffer_t input_buf;
+ serf__kerb_buffer_t output_buf;
+ apr_status_t status = APR_SUCCESS;
+
+ /* If the server sent us a token, pass it to gss_init_sec_token for
+ validation. */
+ if (token) {
+ input_buf.value = token;
+ input_buf.length = token_len;
+ } else {
+ input_buf.value = 0;
+ input_buf.length = 0;
+ }
+
+ /* Establish a security context to the server. */
+ status = serf__kerb_init_sec_context
+ (gss_info->gss_ctx,
+ KRB_HTTP_SERVICE, hostname,
+ &input_buf,
+ &output_buf,
+ gss_info->pool,
+ gss_info->pool
+ );
+
+ switch(status) {
+ case APR_SUCCESS:
+ gss_info->state = gss_api_auth_completed;
+ break;
+ case APR_EAGAIN:
+ gss_info->state = gss_api_auth_in_progress;
+ status = APR_SUCCESS;
+ break;
+ default:
+ status = SERF_ERROR_AUTHN_FAILED;
+ break;
+ }
+
+ /* Return the session key to our caller. */
+ *buf = output_buf.value;
+ *buf_len = output_buf.length;
+
+ return status;
+}
+
+/* Read the header sent by the server (if any), invoke the gssapi authn
+ code and use the resulting Server Ticket on the next request to the
+ server. */
+static apr_status_t
+do_auth(int code,
+ gss_authn_info_t *gss_info,
+ serf_connection_t *conn,
+ const char *auth_hdr,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = conn->ctx;
+ serf__authn_info_t *authn_info = (code == 401) ? &ctx->authn_info :
+ &ctx->proxy_authn_info;
+ const char *tmp = NULL;
+ char *token = NULL;
+ apr_size_t tmp_len = 0, token_len = 0;
+ const char *space = NULL;
+ apr_status_t status;
+
+ /* The server will return a token as attribute to the Negotiate key.
+ Negotiate YGwGCSqGSIb3EgECAgIAb10wW6ADAgEFoQMCAQ+iTzBNoAMCARCiRgREa6mouM
+ BAMFqKVdTGtfpZNXKzyw4Yo1paphJdIA3VOgncaoIlXxZLnkHiIHS2v65pVvrp
+ bRIyjF8xve9HxpnNIucCY9c=
+
+ Read this base64 value, decode it and validate it so we're sure the server
+ is who we expect it to be. */
+ if (auth_hdr)
+ space = strchr(auth_hdr, ' ');
+
+ if (space) {
+ token = apr_palloc(pool, apr_base64_decode_len(space + 1));
+ token_len = apr_base64_decode(token, space + 1);
+ }
+
+ /* We can get a whole batch of 401 responses from the server, but we should
+ only start the authentication phase once, so if we started authentication
+ already ignore all responses with initial Negotiate authentication header.
+
+ Note: as we set the max. transfer rate to one message at a time until the
+ authentication cycle is finished, this check shouldn't be needed. */
+ if (!token && gss_info->state != gss_api_auth_not_started)
+ return APR_SUCCESS;
+
+ status = gss_api_get_credentials(token, token_len, conn->host_info.hostname,
+ &tmp, &tmp_len,
+ gss_info);
+ if (status)
+ return status;
+
+ serf__encode_auth_header(&gss_info->value, authn_info->scheme->name,
+ tmp,
+ tmp_len,
+ pool);
+ gss_info->header = (code == 401) ? "Authorization" : "Proxy-Authorization";
+
+ /* If the handshake is finished tell serf it can send as much requests as it
+ likes. */
+ if (gss_info->state == gss_api_auth_completed)
+ serf_connection_set_max_outstanding_requests(conn, 0);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__init_kerb(int code,
+ serf_context_t *ctx,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+/* A new connection is created to a server that's known to use
+ Kerberos. */
+apr_status_t
+serf__init_kerb_connection(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ gss_authn_info_t *gss_info;
+ apr_status_t status;
+
+ gss_info = apr_pcalloc(pool, sizeof(*gss_info));
+ gss_info->pool = conn->pool;
+ gss_info->state = gss_api_auth_not_started;
+ status = serf__kerb_create_sec_context(&gss_info->gss_ctx, pool,
+ gss_info->pool);
+
+ if (status) {
+ return status;
+ }
+
+ if (code == 401) {
+ conn->authn_baton = gss_info;
+ } else {
+ conn->proxy_authn_baton = gss_info;
+ }
+
+ /* Make serf send the initial requests one by one */
+ serf_connection_set_max_outstanding_requests(conn, 1);
+
+ return APR_SUCCESS;
+}
+
+/* A 401 response was received, handle the authentication. */
+apr_status_t
+serf__handle_kerb_auth(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool)
+{
+ serf_connection_t *conn = request->conn;
+ gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+
+ return do_auth(code,
+ gss_info,
+ request->conn,
+ auth_hdr,
+ pool);
+}
+
+/* Setup the authn headers on this request message. */
+apr_status_t
+serf__setup_request_kerb_auth(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt)
+{
+ gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+
+ if (gss_info && gss_info->header && gss_info->value) {
+ serf_bucket_headers_setn(hdrs_bkt, gss_info->header,
+ gss_info->value);
+
+ /* We should send each token only once. */
+ gss_info->header = NULL;
+ gss_info->value = NULL;
+ return APR_SUCCESS;
+ }
+
+ return SERF_ERROR_AUTHN_FAILED;
+}
+
+/* Function is called when 2xx responses are received. Normally we don't
+ * have to do anything, except for the first response after the
+ * authentication handshake. This specific response includes authentication
+ * data which should be validated by the client (mutual authentication).
+ */
+apr_status_t
+serf__validate_response_kerb_auth(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool)
+{
+ gss_authn_info_t *gss_info = (code == 401) ? conn->authn_baton :
+ conn->proxy_authn_baton;
+ serf_bucket_t *hdrs;
+ const char *auth_hdr;
+
+ hdrs = serf_bucket_response_get_headers(response);
+ auth_hdr = serf_bucket_headers_get(hdrs, "WWW-Authenticate");
+
+ if (gss_info->state != gss_api_auth_completed) {
+ return do_auth(code,
+ gss_info,
+ conn,
+ auth_hdr,
+ pool);
+ }
+
+ return APR_SUCCESS;
+}
+
+#endif /* SERF_HAVE_GSSAPI */
diff --git a/auth/auth_kerb.h b/auth/auth_kerb.h
new file mode 100644
index 0000000..a0689c1
--- /dev/null
+++ b/auth/auth_kerb.h
@@ -0,0 +1,103 @@
+/* Copyright 2010 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUTH_KERB_H
+#define AUTH_KERB_H
+
+#include <apr.h>
+#include <apr_pools.h>
+
+#if defined(SERF_HAVE_SSPI)
+#define SERF_HAVE_KERB
+#define SERF_USE_SSPI
+#elif defined(SERF_HAVE_GSSAPI)
+#define SERF_HAVE_KERB
+#define SERF_USE_GSSAPI
+#endif
+
+#ifdef SERF_HAVE_KERB
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct serf__kerb_context_t serf__kerb_context_t;
+
+typedef struct serf__kerb_buffer_t {
+ apr_size_t length;
+ void *value;
+} serf__kerb_buffer_t;
+
+/* Create outbound security context.
+ *
+ * All temporary allocations will be performed in SCRATCH_POOL, while security
+ * context will be allocated in result_pool and will be destroyed automatically
+ * on RESULT_POOL cleanup.
+ *
+ */
+apr_status_t
+serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool);
+
+/* Initialize outbound security context.
+ *
+ * The function is used to build a security context between the client
+ * application and a remote peer.
+ *
+ * CTX is pointer to existing context created using
+ * serf__kerb_create_sec_context() function.
+ *
+ * SERVICE is name of Kerberos service name. Usually 'HTTP'. HOSTNAME is
+ * canonical name of destination server. Caller should resolve server's alias
+ * to canonical name.
+ *
+ * INPUT_BUF is pointer structure describing input token if any. Should be
+ * zero length on first call.
+ *
+ * OUTPUT_BUF will be populated with pointer to output data that should send
+ * to destination server. This buffer will be automatically freed on
+ * RESULT_POOL cleanup.
+ *
+ * All temporary allocations will be performed in SCRATCH_POOL.
+ *
+ * Return value:
+ * - APR_EAGAIN The client must send the output token to the server and wait
+ * for a return token.
+ *
+ * - APR_SUCCESS The security context was successfully initialized. There is no
+ * need for another serf__kerb_init_sec_context call. If the function returns
+ * an output token, that is, if the OUTPUT_BUF is of nonzero length, that
+ * token must be sent to the server.
+ *
+ * Other returns values indicates error.
+ */
+apr_status_t
+serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__kerb_buffer_t *input_buf,
+ serf__kerb_buffer_t *output_buf,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool
+ );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SERF_HAVE_KERB */
+
+#endif /* !AUTH_KERB_H */
diff --git a/auth/auth_kerb_gss.c b/auth/auth_kerb_gss.c
new file mode 100644
index 0000000..b7267d6
--- /dev/null
+++ b/auth/auth_kerb_gss.c
@@ -0,0 +1,146 @@
+/* Copyright 2009 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "auth_kerb.h"
+
+#ifdef SERF_USE_GSSAPI
+#include <apr_strings.h>
+#include <gssapi/gssapi.h>
+
+struct serf__kerb_context_t
+{
+ /* GSSAPI context */
+ gss_ctx_id_t gss_ctx;
+
+ /* Mechanism used to authenticate, should be Kerberos. */
+ gss_OID gss_mech;
+};
+
+/* Cleans the GSS context object, when the pool used to create it gets
+ cleared or destroyed. */
+static apr_status_t
+cleanup_ctx(void *data)
+{
+ OM_uint32 min_stat;
+ serf__kerb_context_t *ctx = data;
+
+ if (ctx->gss_ctx != GSS_C_NO_CONTEXT) {
+ if (gss_delete_sec_context(&min_stat, &ctx->gss_ctx,
+ GSS_C_NO_BUFFER) == GSS_S_FAILURE)
+ return APR_EGENERAL;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+cleanup_sec_buffer(void *data)
+{
+ OM_uint32 min_stat;
+ gss_buffer_desc *gss_buf = data;
+
+ gss_release_buffer(&min_stat, gss_buf);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool)
+{
+ serf__kerb_context_t *ctx;
+
+ ctx = apr_pcalloc(result_pool, sizeof(*ctx));
+
+ ctx->gss_ctx = GSS_C_NO_CONTEXT;
+ ctx->gss_mech = GSS_C_NO_OID;
+
+ apr_pool_cleanup_register(result_pool, ctx,
+ cleanup_ctx,
+ apr_pool_cleanup_null);
+
+ *ctx_p = ctx;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__kerb_buffer_t *input_buf,
+ serf__kerb_buffer_t *output_buf,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool
+ )
+{
+ gss_buffer_desc gss_input_buf = GSS_C_EMPTY_BUFFER;
+ gss_buffer_desc *gss_output_buf_p;
+ OM_uint32 gss_min_stat, gss_maj_stat;
+ gss_name_t host_gss_name;
+ gss_buffer_desc bufdesc;
+
+ /* Get the name for the HTTP service at the target host. */
+ bufdesc.value = apr_pstrcat(scratch_pool, service, "@", hostname, NULL);
+ bufdesc.length = strlen(bufdesc.value);
+ gss_maj_stat = gss_import_name (&gss_min_stat, &bufdesc, GSS_C_NT_HOSTBASED_SERVICE,
+ &host_gss_name);
+ if(GSS_ERROR(gss_maj_stat)) {
+ return APR_EGENERAL;
+ }
+
+ /* If the server sent us a token, pass it to gss_init_sec_token for
+ validation. */
+ gss_input_buf.value = input_buf->value;
+ gss_input_buf.length = input_buf->length;
+
+ gss_output_buf_p = apr_pcalloc(result_pool, sizeof(*gss_output_buf_p));
+
+ /* Establish a security context to the server. */
+ gss_maj_stat = gss_init_sec_context
+ (&gss_min_stat, /* minor_status */
+ GSS_C_NO_CREDENTIAL, /* XXXXX claimant_cred_handle */
+ &ctx->gss_ctx, /* gssapi context handle */
+ host_gss_name, /* HTTP@server name */
+ ctx->gss_mech, /* mech_type (0 ininitially */
+ GSS_C_MUTUAL_FLAG, /* ensure the peer authenticates itself */
+ 0, /* default validity period */
+ GSS_C_NO_CHANNEL_BINDINGS, /* do not use channel bindings */
+ &gss_input_buf, /* server token, initially empty */
+ &ctx->gss_mech, /* actual mech type */
+ gss_output_buf_p, /* output_token */
+ NULL, /* ret_flags */
+ NULL /* not interested in remaining validity */
+ );
+
+ apr_pool_cleanup_register(result_pool, gss_output_buf_p,
+ cleanup_sec_buffer,
+ apr_pool_cleanup_null);
+
+ output_buf->value = gss_output_buf_p->value;
+ output_buf->length = gss_output_buf_p->length;
+
+ switch(gss_maj_stat) {
+ case GSS_S_COMPLETE:
+ return APR_SUCCESS;
+ case GSS_S_CONTINUE_NEEDED:
+ return APR_EAGAIN;
+ default:
+ return APR_EGENERAL;
+ }
+}
+
+#endif /* SERF_USE_GSSAPI */
diff --git a/auth/auth_kerb_sspi.c b/auth/auth_kerb_sspi.c
new file mode 100644
index 0000000..db28ab1
--- /dev/null
+++ b/auth/auth_kerb_sspi.c
@@ -0,0 +1,210 @@
+/* Copyright 2010 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "auth_kerb.h"
+
+#ifdef SERF_USE_SSPI
+#include <apr.h>
+#include <apr_strings.h>
+
+#define SECURITY_WIN32
+#include <sspi.h>
+
+struct serf__kerb_context_t
+{
+ CredHandle sspi_credentials;
+ CtxtHandle sspi_context;
+ BOOL initalized;
+};
+
+/* Cleans the SSPI context object, when the pool used to create it gets
+ cleared or destroyed. */
+static apr_status_t
+cleanup_ctx(void *data)
+{
+ serf__kerb_context_t *ctx = data;
+
+ if (SecIsValidHandle(&ctx->sspi_context)) {
+ DeleteSecurityContext(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_context);
+ }
+
+ if (SecIsValidHandle(&ctx->sspi_credentials)) {
+ FreeCredentialsHandle(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_context);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+cleanup_sec_buffer(void *data)
+{
+ FreeContextBuffer(data);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__kerb_create_sec_context(serf__kerb_context_t **ctx_p,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool)
+{
+ SECURITY_STATUS sspi_status;
+ serf__kerb_context_t *ctx;
+
+ ctx = apr_pcalloc(result_pool, sizeof(*ctx));
+
+ SecInvalidateHandle(&ctx->sspi_context);
+ SecInvalidateHandle(&ctx->sspi_credentials);
+ ctx->initalized = FALSE;
+
+ apr_pool_cleanup_register(result_pool, ctx,
+ cleanup_ctx,
+ apr_pool_cleanup_null);
+
+ sspi_status = AcquireCredentialsHandle(
+ NULL, "Negotiate", SECPKG_CRED_OUTBOUND,
+ NULL, NULL, NULL, NULL,
+ &ctx->sspi_credentials, NULL);
+
+ if (FAILED(sspi_status)) {
+ return APR_EGENERAL;
+ }
+
+ *ctx_p = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t
+get_canonical_hostname(const char **canonname,
+ const char *hostname,
+ apr_pool_t *pool)
+{
+ struct addrinfo hints;
+ struct addrinfo *addrinfo;
+
+ ZeroMemory(&hints, sizeof(hints));
+ hints.ai_flags = AI_CANONNAME;
+
+ if (getaddrinfo(hostname, NULL, &hints, &addrinfo)) {
+ return apr_get_netos_error();
+ }
+
+ if (addrinfo) {
+ *canonname = apr_pstrdup(pool, addrinfo->ai_canonname);
+ }
+ else {
+ *canonname = apr_pstrdup(pool, hostname);
+ }
+
+ freeaddrinfo(addrinfo);
+ return APR_SUCCESS;
+}
+
+apr_status_t
+serf__kerb_init_sec_context(serf__kerb_context_t *ctx,
+ const char *service,
+ const char *hostname,
+ serf__kerb_buffer_t *input_buf,
+ serf__kerb_buffer_t *output_buf,
+ apr_pool_t *scratch_pool,
+ apr_pool_t *result_pool
+ )
+{
+ SECURITY_STATUS status;
+ ULONG actual_attr;
+ SecBuffer sspi_in_buffer;
+ SecBufferDesc sspi_in_buffer_desc;
+ SecBuffer sspi_out_buffer;
+ SecBufferDesc sspi_out_buffer_desc;
+ char *target_name;
+ apr_status_t apr_status;
+ const char *canonname;
+
+ apr_status = get_canonical_hostname(&canonname, hostname, scratch_pool);
+ if (apr_status) {
+ return apr_status;
+ }
+ target_name = apr_pstrcat(scratch_pool, service, "/", canonname, NULL);
+
+ /* Prepare input buffer description. */
+ sspi_in_buffer.BufferType = SECBUFFER_TOKEN;
+ sspi_in_buffer.pvBuffer = input_buf->value;
+ sspi_in_buffer.cbBuffer = input_buf->length;
+
+ sspi_in_buffer_desc.cBuffers = 1;
+ sspi_in_buffer_desc.pBuffers = &sspi_in_buffer;
+ sspi_in_buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ /* Output buffers. Output buffer will be allocated by system. */
+ sspi_out_buffer.BufferType = SECBUFFER_TOKEN;
+ sspi_out_buffer.pvBuffer = NULL;
+ sspi_out_buffer.cbBuffer = 0;
+
+ sspi_out_buffer_desc.cBuffers = 1;
+ sspi_out_buffer_desc.pBuffers = &sspi_out_buffer;
+ sspi_out_buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ status = InitializeSecurityContext(
+ &ctx->sspi_credentials,
+ ctx->initalized ? &ctx->sspi_context : NULL,
+ target_name,
+ ISC_REQ_ALLOCATE_MEMORY
+ | ISC_REQ_MUTUAL_AUTH
+ | ISC_REQ_CONFIDENTIALITY,
+ 0, /* Reserved1 */
+ SECURITY_NETWORK_DREP,
+ &sspi_in_buffer_desc,
+ 0, /* Reserved2 */
+ &ctx->sspi_context,
+ &sspi_out_buffer_desc,
+ &actual_attr,
+ NULL);
+
+ if (sspi_out_buffer.cbBuffer > 0) {
+ apr_pool_cleanup_register(result_pool, sspi_out_buffer.pvBuffer,
+ cleanup_sec_buffer,
+ apr_pool_cleanup_null);
+ }
+
+ ctx->initalized = TRUE;
+
+ /* Finish authentication if SSPI requires so. */
+ if (status == SEC_I_COMPLETE_NEEDED
+ || status == SEC_I_COMPLETE_AND_CONTINUE)
+ {
+ CompleteAuthToken(&ctx->sspi_context, &sspi_out_buffer_desc);
+ }
+
+ output_buf->value = sspi_out_buffer.pvBuffer;
+ output_buf->length = sspi_out_buffer.cbBuffer;
+
+ switch(status) {
+ case SEC_I_COMPLETE_AND_CONTINUE:
+ case SEC_I_CONTINUE_NEEDED:
+ return APR_EAGAIN;
+
+ case SEC_I_COMPLETE_NEEDED:
+ case SEC_E_OK:
+ return APR_SUCCESS;
+
+ default:
+ return APR_EGENERAL;
+ }
+}
+
+#endif /* SERF_USE_SSPI */ \ No newline at end of file
diff --git a/buckets/aggregate_buckets.c b/buckets/aggregate_buckets.c
new file mode 100644
index 0000000..d9d15a3
--- /dev/null
+++ b/buckets/aggregate_buckets.c
@@ -0,0 +1,400 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+/* Should be an APR_RING? */
+typedef struct bucket_list {
+ serf_bucket_t *bucket;
+ struct bucket_list *next;
+} bucket_list_t;
+
+typedef struct {
+ bucket_list_t *list; /* active buckets */
+ bucket_list_t *last; /* last bucket of the list */
+ bucket_list_t *done; /* we finished reading this; now pending a destroy */
+
+ serf_bucket_aggregate_eof_t hold_open;
+ void *hold_open_baton;
+
+ /* Does this bucket own its children? !0 if yes, 0 if not. */
+ int bucket_owner;
+} aggregate_context_t;
+
+
+static void cleanup_aggregate(aggregate_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ bucket_list_t *next_list;
+
+ /* If we finished reading a bucket during the previous read, then
+ * we can now toss that bucket.
+ */
+ while (ctx->done != NULL) {
+ next_list = ctx->done->next;
+
+ if (ctx->bucket_owner) {
+ serf_bucket_destroy(ctx->done->bucket);
+ }
+ serf_bucket_mem_free(allocator, ctx->done);
+
+ ctx->done = next_list;
+ }
+}
+
+void serf_bucket_aggregate_cleanup(
+ serf_bucket_t *bucket, serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx = bucket->data;
+
+ cleanup_aggregate(ctx, allocator);
+}
+
+static aggregate_context_t *create_aggregate(serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+
+ ctx->list = NULL;
+ ctx->last = NULL;
+ ctx->done = NULL;
+ ctx->hold_open = NULL;
+ ctx->hold_open_baton = NULL;
+ ctx->bucket_owner = 1;
+
+ return ctx;
+}
+
+serf_bucket_t *serf_bucket_aggregate_create(
+ serf_bucket_alloc_t *allocator)
+{
+ aggregate_context_t *ctx;
+
+ ctx = create_aggregate(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_aggregate, allocator, ctx);
+}
+
+serf_bucket_t *serf__bucket_stream_create(
+ serf_bucket_alloc_t *allocator,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton)
+{
+ serf_bucket_t *bucket = serf_bucket_aggregate_create(allocator);
+ aggregate_context_t *ctx = bucket->data;
+
+ serf_bucket_aggregate_hold_open(bucket, fn, baton);
+
+ ctx->bucket_owner = 0;
+
+ return bucket;
+}
+
+
+static void serf_aggregate_destroy_and_data(serf_bucket_t *bucket)
+{
+ aggregate_context_t *ctx = bucket->data;
+ bucket_list_t *next_ctx;
+
+ while (ctx->list) {
+ if (ctx->bucket_owner) {
+ serf_bucket_destroy(ctx->list->bucket);
+ }
+ next_ctx = ctx->list->next;
+ serf_bucket_mem_free(bucket->allocator, ctx->list);
+ ctx->list = next_ctx;
+ }
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+void serf_bucket_aggregate_become(serf_bucket_t *bucket)
+{
+ aggregate_context_t *ctx;
+
+ ctx = create_aggregate(bucket->allocator);
+
+ bucket->type = &serf_bucket_type_aggregate;
+ bucket->data = ctx;
+
+ /* The allocator remains the same. */
+}
+
+
+void serf_bucket_aggregate_prepend(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *prepend_bucket)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
+ sizeof(*new_list));
+ new_list->bucket = prepend_bucket;
+ new_list->next = ctx->list;
+
+ ctx->list = new_list;
+}
+
+void serf_bucket_aggregate_append(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *append_bucket)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(aggregate_bucket->allocator,
+ sizeof(*new_list));
+ new_list->bucket = append_bucket;
+ new_list->next = NULL;
+
+ /* If we use APR_RING, this is trivial. So, wait.
+ new_list->next = ctx->list;
+ ctx->list = new_list;
+ */
+ if (ctx->list == NULL) {
+ ctx->list = new_list;
+ ctx->last = new_list;
+ }
+ else {
+ ctx->last->next = new_list;
+ ctx->last = ctx->last->next;
+ }
+}
+
+void serf_bucket_aggregate_hold_open(serf_bucket_t *aggregate_bucket,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton)
+{
+ aggregate_context_t *ctx = aggregate_bucket->data;
+ ctx->hold_open = fn;
+ ctx->hold_open_baton = baton;
+}
+
+void serf_bucket_aggregate_prepend_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count)
+{
+ int i;
+
+ /* Add in reverse order. */
+ for (i = vecs_count - 1; i >= 0; i--) {
+ serf_bucket_t *new_bucket;
+
+ new_bucket = serf_bucket_simple_create(vecs[i].iov_base,
+ vecs[i].iov_len,
+ NULL, NULL,
+ aggregate_bucket->allocator);
+
+ serf_bucket_aggregate_prepend(aggregate_bucket, new_bucket);
+
+ }
+}
+
+void serf_bucket_aggregate_append_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count)
+{
+ serf_bucket_t *new_bucket;
+
+ new_bucket = serf_bucket_iovec_create(vecs, vecs_count,
+ aggregate_bucket->allocator);
+
+ serf_bucket_aggregate_append(aggregate_bucket, new_bucket);
+}
+
+static apr_status_t read_aggregate(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used)
+{
+ aggregate_context_t *ctx = bucket->data;
+ int cur_vecs_used;
+ apr_status_t status;
+
+ *vecs_used = 0;
+
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ status = APR_SUCCESS;
+ while (requested) {
+ serf_bucket_t *head = ctx->list->bucket;
+
+ status = serf_bucket_read_iovec(head, requested, vecs_size, vecs,
+ &cur_vecs_used);
+
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Add the number of vecs we read to our running total. */
+ *vecs_used += cur_vecs_used;
+
+ if (cur_vecs_used > 0 || status) {
+ bucket_list_t *next_list;
+
+ /* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now
+ * as it isn't safe to read more without returning to our caller.
+ */
+ if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) {
+ return status;
+ }
+
+ /* However, if we read EOF, we can stash this bucket in a
+ * to-be-freed list and move on to the next bucket. This ensures
+ * that the bucket stays alive (so as not to violate our read
+ * semantics). We'll destroy this list of buckets the next time
+ * we are asked to perform a read operation - thus ensuring the
+ * proper read lifetime.
+ */
+ next_list = ctx->list->next;
+ ctx->list->next = ctx->done;
+ ctx->done = ctx->list;
+ ctx->list = next_list;
+
+ /* If we have no more in our list, return EOF. */
+ if (!ctx->list) {
+ if (ctx->hold_open) {
+ return ctx->hold_open(ctx->hold_open_baton, bucket);
+ }
+ else {
+ return APR_EOF;
+ }
+ }
+
+ /* At this point, it safe to read the next bucket - if we can. */
+
+ /* If the caller doesn't want ALL_AVAIL, decrement the size
+ * of the items we just read from the list.
+ */
+ if (requested != SERF_READ_ALL_AVAIL) {
+ int i;
+
+ for (i = 0; i < cur_vecs_used; i++)
+ requested -= vecs[i].iov_len;
+ }
+
+ /* Adjust our vecs to account for what we just read. */
+ vecs_size -= cur_vecs_used;
+ vecs += cur_vecs_used;
+
+ /* We reached our max. Oh well. */
+ if (!requested || !vecs_size) {
+ return APR_SUCCESS;
+ }
+ }
+ }
+
+ return status;
+}
+
+static apr_status_t serf_aggregate_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ aggregate_context_t *ctx = bucket->data;
+ struct iovec vec;
+ int vecs_used;
+ apr_status_t status;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ status = read_aggregate(bucket, requested, 1, &vec, &vecs_used);
+
+ if (!vecs_used) {
+ *len = 0;
+ }
+ else {
+ *data = vec.iov_base;
+ *len = vec.iov_len;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_aggregate_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ aggregate_context_t *ctx = bucket->data;
+
+ cleanup_aggregate(ctx, bucket->allocator);
+
+ return read_aggregate(bucket, requested, vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_aggregate_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ /* Follow pattern from serf_aggregate_read. */
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t serf_aggregate_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Follow pattern from serf_aggregate_read. */
+ return APR_ENOTIMPL;
+}
+
+static serf_bucket_t * serf_aggregate_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type)
+{
+ aggregate_context_t *ctx = bucket->data;
+ serf_bucket_t *found_bucket;
+
+ if (!ctx->list) {
+ return NULL;
+ }
+
+ if (ctx->list->bucket->type == type) {
+ /* Got the bucket. Consume it from our list. */
+ found_bucket = ctx->list->bucket;
+ ctx->list = ctx->list->next;
+ return found_bucket;
+ }
+
+ /* Call read_bucket on first one in our list. */
+ return serf_bucket_read_bucket(ctx->list->bucket, type);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_aggregate = {
+ "AGGREGATE",
+ serf_aggregate_read,
+ serf_aggregate_readline,
+ serf_aggregate_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_aggregate_read_bucket,
+ serf_aggregate_peek,
+ serf_aggregate_destroy_and_data,
+};
diff --git a/buckets/allocator.c b/buckets/allocator.c
new file mode 100644
index 0000000..857cc74
--- /dev/null
+++ b/buckets/allocator.c
@@ -0,0 +1,434 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct node_header_t {
+ apr_size_t size;
+ union {
+ struct node_header_t *next; /* if size == 0 (freed/inactive) */
+ /* no data if size == STANDARD_NODE_SIZE */
+ apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
+ } u;
+} node_header_t;
+
+/* The size of a node_header_t, properly aligned. Note that (normally)
+ * this macro will round the size to a multiple of 8 bytes. Keep this in
+ * mind when altering the node_header_t structure. Also, keep in mind that
+ * node_header_t is an overhead for every allocation performed through
+ * the serf_bucket_mem_alloc() function.
+ */
+#define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
+
+
+/* STANDARD_NODE_SIZE is manually set to an allocation size that will
+ * capture most allocators performed via this API. It must be "large
+ * enough" to avoid lots of spillage to allocating directly from the
+ * apr_allocator associated with the bucket allocator. The apr_allocator
+ * has a minimum size of 8k, which can be expensive if you missed the
+ * STANDARD_NODE_SIZE by just a few bytes.
+ */
+/* ### we should define some rules or ways to determine how to derive
+ * ### a "good" value for this. probably log some stats on allocs, then
+ * ### analyze them for size "misses". then find the balance point between
+ * ### wasted space due to min-size allocator, and wasted-space due to
+ * ### size-spill to the 8k minimum.
+ */
+#define STANDARD_NODE_SIZE 128
+
+/* When allocating a block of memory from the allocator, we should go for
+ * an 8k block, minus the overhead that the allocator needs.
+ */
+#define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
+
+/* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
+ * calls to serf_bucket_mem_free().
+ */
+#define DEBUG_DOUBLE_FREE
+
+
+typedef struct {
+ const serf_bucket_t *bucket;
+ apr_status_t last;
+} read_status_t;
+
+#define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
+
+typedef struct {
+ int next_index; /* info[] is a ring. next bucket goes at this idx. */
+ int num_used;
+
+ read_status_t info[TRACK_BUCKET_COUNT];
+} track_state_t;
+
+
+struct serf_bucket_alloc_t {
+ apr_pool_t *pool;
+ apr_allocator_t *allocator;
+ int own_allocator;
+
+ serf_unfreed_func_t unfreed;
+ void *unfreed_baton;
+
+ apr_uint32_t num_alloc;
+
+ node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
+ apr_memnode_t *blocks; /* blocks we allocated for subdividing */
+
+ track_state_t *track;
+};
+
+/* ==================================================================== */
+
+
+static apr_status_t allocator_cleanup(void *data)
+{
+ serf_bucket_alloc_t *allocator = data;
+
+ /* If we allocated anything, give it back. */
+ if (allocator->blocks) {
+ apr_allocator_free(allocator->allocator, allocator->blocks);
+ }
+
+ /* If we allocated our own allocator (?!), destroy it here. */
+ if (allocator->own_allocator) {
+ apr_allocator_destroy(allocator->allocator);
+ }
+
+ return APR_SUCCESS;
+}
+
+serf_bucket_alloc_t *serf_bucket_allocator_create(
+ apr_pool_t *pool,
+ serf_unfreed_func_t unfreed,
+ void *unfreed_baton)
+{
+ serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
+
+ allocator->pool = pool;
+ allocator->allocator = apr_pool_allocator_get(pool);
+ if (allocator->allocator == NULL) {
+ /* This most likely means pools are running in debug mode, create our
+ * own allocator to deal with memory ourselves */
+ apr_allocator_create(&allocator->allocator);
+ allocator->own_allocator = 1;
+ }
+ allocator->unfreed = unfreed;
+ allocator->unfreed_baton = unfreed_baton;
+
+#ifdef SERF_DEBUG_BUCKET_USE
+ {
+ track_state_t *track;
+
+ track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
+ track->next_index = 0;
+ track->num_used = 0;
+ }
+#endif
+
+ /* ### this implies buckets cannot cross a fork/exec. desirable?
+ *
+ * ### hmm. it probably also means that buckets cannot be AROUND
+ * ### during a fork/exec. the new process will try to clean them
+ * ### up and figure out there are unfreed blocks...
+ */
+ apr_pool_cleanup_register(pool, allocator,
+ allocator_cleanup, allocator_cleanup);
+
+ return allocator;
+}
+
+apr_pool_t *serf_bucket_allocator_get_pool(
+ const serf_bucket_alloc_t *allocator)
+{
+ return allocator->pool;
+}
+
+
+void *serf_bucket_mem_alloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size)
+{
+ node_header_t *node;
+
+ ++allocator->num_alloc;
+
+ size += SIZEOF_NODE_HEADER_T;
+ if (size <= STANDARD_NODE_SIZE) {
+ if (allocator->freelist) {
+ /* just pull a node off our freelist */
+ node = allocator->freelist;
+ allocator->freelist = node->u.next;
+#ifdef DEBUG_DOUBLE_FREE
+ /* When we free an item, we set its size to zero. Thus, when
+ * we return it to the caller, we must ensure the size is set
+ * properly.
+ */
+ node->size = STANDARD_NODE_SIZE;
+#endif
+ }
+ else {
+ apr_memnode_t *active = allocator->blocks;
+
+ if (active == NULL
+ || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
+ apr_memnode_t *head = allocator->blocks;
+
+ /* ran out of room. grab another block. */
+ active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
+
+ /* System couldn't provide us with memory. */
+ if (active == NULL)
+ return NULL;
+
+ /* link the block into our tracking list */
+ allocator->blocks = active;
+ active->next = head;
+ }
+
+ node = (node_header_t *)active->first_avail;
+ node->size = STANDARD_NODE_SIZE;
+ active->first_avail += STANDARD_NODE_SIZE;
+ }
+ }
+ else {
+ apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
+ size);
+
+ if (memnode == NULL)
+ return NULL;
+
+ node = (node_header_t *)memnode->first_avail;
+ node->u.memnode = memnode;
+ node->size = size;
+ }
+
+ return ((char *)node) + SIZEOF_NODE_HEADER_T;
+}
+
+
+void *serf_bucket_mem_calloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size)
+{
+ void *mem;
+ mem = serf_bucket_mem_alloc(allocator, size);
+ if (mem == NULL)
+ return NULL;
+ memset(mem, 0, size);
+ return mem;
+}
+
+
+void serf_bucket_mem_free(
+ serf_bucket_alloc_t *allocator,
+ void *block)
+{
+ node_header_t *node;
+
+ --allocator->num_alloc;
+
+ node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
+
+ if (node->size == STANDARD_NODE_SIZE) {
+ /* put the node onto our free list */
+ node->u.next = allocator->freelist;
+ allocator->freelist = node;
+
+#ifdef DEBUG_DOUBLE_FREE
+ /* note that this thing was freed. */
+ node->size = 0;
+ }
+ else if (node->size == 0) {
+ /* damn thing was freed already. */
+ abort();
+#endif
+ }
+ else {
+#ifdef DEBUG_DOUBLE_FREE
+ /* note that this thing was freed. */
+ node->size = 0;
+#endif
+
+ /* now free it */
+ apr_allocator_free(allocator->allocator, node->u.memnode);
+ }
+}
+
+
+/* ==================================================================== */
+
+
+#ifdef SERF_DEBUG_BUCKET_USE
+
+static read_status_t *find_read_status(
+ track_state_t *track,
+ const serf_bucket_t *bucket,
+ int create_rs)
+{
+ read_status_t *rs;
+
+ if (track->num_used) {
+ int count = track->num_used;
+ int idx = track->next_index;
+
+ /* Search backwards. In all likelihood, the bucket which just got
+ * read was read very recently.
+ */
+ while (count-- > 0) {
+ if (!idx--) {
+ /* assert: track->num_used == TRACK_BUCKET_COUNT */
+ idx = track->num_used - 1;
+ }
+ if ((rs = &track->info[idx])->bucket == bucket) {
+ return rs;
+ }
+ }
+ }
+
+ /* Only create a new read_status_t when asked. */
+ if (!create_rs)
+ return NULL;
+
+ if (track->num_used < TRACK_BUCKET_COUNT) {
+ /* We're still filling up the ring. */
+ ++track->num_used;
+ }
+
+ rs = &track->info[track->next_index];
+ rs->bucket = bucket;
+ rs->last = APR_SUCCESS; /* ### the right initial value? */
+
+ if (++track->next_index == TRACK_BUCKET_COUNT)
+ track->next_index = 0;
+
+ return rs;
+}
+
+#endif /* SERF_DEBUG_BUCKET_USE */
+
+
+apr_status_t serf_debug__record_read(
+ const serf_bucket_t *bucket,
+ apr_status_t status)
+{
+#ifndef SERF_DEBUG_BUCKET_USE
+ return status;
+#else
+
+ track_state_t *track = bucket->allocator->track;
+ read_status_t *rs = find_read_status(track, bucket, 1);
+
+ /* Validate that the previous status value allowed for another read. */
+ if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
+ /* Somebody read when they weren't supposed to. Bail. */
+ abort();
+ }
+
+ /* Save the current status for later. */
+ rs->last = status;
+
+ return status;
+#endif
+}
+
+
+void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ track_state_t *track = allocator->track;
+ read_status_t *rs = &track->info[0];
+
+ for ( ; track->num_used; --track->num_used, ++rs ) {
+ if (rs->last == APR_SUCCESS) {
+ /* Somebody should have read this bucket again. */
+ abort();
+ }
+
+ /* ### other status values? */
+ }
+
+ /* num_used was reset. also need to reset the next index. */
+ track->next_index = 0;
+
+#endif
+}
+
+
+void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ /* Just reset the number used so that we don't examine the info[] */
+ allocator->track->num_used = 0;
+ allocator->track->next_index = 0;
+
+#endif
+}
+
+
+void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+
+ track_state_t *track = bucket->allocator->track;
+ read_status_t *rs = find_read_status(track, bucket, 0);
+
+ if (rs != NULL && rs->last != APR_EOF) {
+ /* The bucket was destroyed before it was read to completion. */
+
+ /* Special exception for socket buckets. If a connection remains
+ * open, they are not read to completion.
+ */
+ if (SERF_BUCKET_IS_SOCKET(bucket))
+ return;
+
+ /* Ditto for SSL Decrypt buckets. */
+ if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
+ return;
+
+ /* Ditto for SSL Encrypt buckets. */
+ if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
+ return;
+
+ /* Ditto for barrier buckets. */
+ if (SERF_BUCKET_IS_BARRIER(bucket))
+ return;
+
+
+ abort();
+ }
+
+#endif
+}
+
+
+void serf_debug__bucket_alloc_check(
+ serf_bucket_alloc_t *allocator)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+ if (allocator->num_alloc != 0) {
+ abort();
+ }
+#endif
+}
+
diff --git a/buckets/barrier_buckets.c b/buckets/barrier_buckets.c
new file mode 100644
index 0000000..eb410ee
--- /dev/null
+++ b/buckets/barrier_buckets.c
@@ -0,0 +1,97 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ serf_bucket_t *stream;
+} barrier_context_t;
+
+
+serf_bucket_t *serf_bucket_barrier_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ barrier_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+
+ return serf_bucket_create(&serf_bucket_type_barrier, allocator, ctx);
+}
+
+static apr_status_t serf_barrier_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_read(ctx->stream, requested, data, len);
+}
+
+static apr_status_t serf_barrier_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_read_iovec(ctx->stream, requested, vecs_size, vecs,
+ vecs_used);
+}
+
+static apr_status_t serf_barrier_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_readline(ctx->stream, acceptable, found, data, len);
+}
+
+static apr_status_t serf_barrier_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ barrier_context_t *ctx = bucket->data;
+
+ return serf_bucket_peek(ctx->stream, data, len);
+}
+
+static void serf_barrier_destroy(serf_bucket_t *bucket)
+{
+ /* The intent of this bucket is not to let our wrapped buckets be
+ * destroyed. */
+
+ /* The option is for us to go ahead and 'eat' this bucket now,
+ * or just ignore the deletion entirely.
+ */
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_barrier = {
+ "BARRIER",
+ serf_barrier_read,
+ serf_barrier_readline,
+ serf_barrier_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_barrier_peek,
+ serf_barrier_destroy,
+};
diff --git a/buckets/buckets.c b/buckets/buckets.c
new file mode 100644
index 0000000..29175ff
--- /dev/null
+++ b/buckets/buckets.c
@@ -0,0 +1,538 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+serf_bucket_t *serf_bucket_create(
+ const serf_bucket_type_t *type,
+ serf_bucket_alloc_t *allocator,
+ void *data)
+{
+ serf_bucket_t *bkt = serf_bucket_mem_alloc(allocator, sizeof(*bkt));
+
+ bkt->type = type;
+ bkt->data = data;
+ bkt->allocator = allocator;
+
+ return bkt;
+}
+
+
+apr_status_t serf_default_read_iovec(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ const char *data;
+ apr_size_t len;
+
+ /* Read some data from the bucket.
+ *
+ * Because we're an internal 'helper' to the bucket, we can't call the
+ * normal serf_bucket_read() call because the debug allocator tracker will
+ * end up marking the bucket as read *twice* - once for us and once for
+ * our caller - which is reading the same bucket. This leads to premature
+ * abort()s if we ever see EAGAIN. Instead, we'll go directly to the
+ * vtable and bypass the debug tracker.
+ */
+ apr_status_t status = bucket->type->read(bucket, requested, &data, &len);
+
+ /* assert that vecs_size >= 1 ? */
+
+ /* Return that data as a single iovec. */
+ if (len) {
+ vecs[0].iov_base = (void *)data; /* loses the 'const' */
+ vecs[0].iov_len = len;
+ *vecs_used = 1;
+ }
+ else {
+ *vecs_used = 0;
+ }
+
+ return status;
+}
+
+
+apr_status_t serf_default_read_for_sendfile(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ apr_hdtr_t *hdtr,
+ apr_file_t **file,
+ apr_off_t *offset,
+ apr_size_t *len)
+{
+ /* Read a bunch of stuff into the headers.
+ *
+ * See serf_default_read_iovec as to why we call into the vtable
+ * directly.
+ */
+ apr_status_t status = bucket->type->read_iovec(bucket, requested,
+ hdtr->numheaders,
+ hdtr->headers,
+ &hdtr->numheaders);
+
+ /* There isn't a file, and there are no trailers. */
+ *file = NULL;
+ hdtr->numtrailers = 0;
+
+ return status;
+}
+
+
+serf_bucket_t *serf_default_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type)
+{
+ return NULL;
+}
+
+
+void serf_default_destroy(serf_bucket_t *bucket)
+{
+#ifdef SERF_DEBUG_BUCKET_USE
+ serf_debug__bucket_destroy(bucket);
+#endif
+
+ serf_bucket_mem_free(bucket->allocator, bucket);
+}
+
+
+void serf_default_destroy_and_data(serf_bucket_t *bucket)
+{
+ serf_bucket_mem_free(bucket->allocator, bucket->data);
+ serf_default_destroy(bucket);
+}
+
+
+/* ==================================================================== */
+
+
+char *serf_bstrmemdup(serf_bucket_alloc_t *allocator,
+ const char *str,
+ apr_size_t size)
+{
+ char *newstr = serf_bucket_mem_alloc(allocator, size + 1);
+ memcpy(newstr, str, size);
+ newstr[size] = '\0';
+ return newstr;
+}
+
+
+void *serf_bmemdup(serf_bucket_alloc_t *allocator,
+ const void *mem,
+ apr_size_t size)
+{
+ void *newmem = serf_bucket_mem_alloc(allocator, size);
+ memcpy(newmem, mem, size);
+ return newmem;
+}
+
+
+char *serf_bstrdup(serf_bucket_alloc_t *allocator,
+ const char *str)
+{
+ apr_size_t size = strlen(str) + 1;
+ char *newstr = serf_bucket_mem_alloc(allocator, size);
+ memcpy(newstr, str, size);
+ return newstr;
+}
+
+
+/* ==================================================================== */
+
+
+static void find_crlf(const char **data, apr_size_t *len, int *found)
+{
+ const char *start = *data;
+ const char *end = start + *len;
+
+ while (start < end) {
+ const char *cr = memchr(start, '\r', *len);
+
+ if (cr == NULL) {
+ break;
+ }
+ ++cr;
+
+ if (cr < end && cr[0] == '\n') {
+ *len -= cr + 1 - start;
+ *data = cr + 1;
+ *found = SERF_NEWLINE_CRLF;
+ return;
+ }
+ if (cr == end) {
+ *len = 0;
+ *data = end;
+ *found = SERF_NEWLINE_CRLF_SPLIT;
+ return;
+ }
+
+ /* It was a bare CR without an LF. Just move past it. */
+ *len -= cr - start;
+ start = cr;
+ }
+
+ *data = start + *len;
+ *len -= *data - start;
+ *found = SERF_NEWLINE_NONE;
+}
+
+
+void serf_util_readline(
+ const char **data,
+ apr_size_t *len,
+ int acceptable,
+ int *found)
+{
+ const char *start;
+ const char *cr;
+ const char *lf;
+ int want_cr;
+ int want_crlf;
+ int want_lf;
+
+ /* If _only_ CRLF is acceptable, then the scanning needs a loop to
+ * skip false hits on CR characters. Use a separate function.
+ */
+ if (acceptable == SERF_NEWLINE_CRLF) {
+ find_crlf(data, len, found);
+ return;
+ }
+
+ start = *data;
+ cr = lf = NULL;
+ want_cr = acceptable & SERF_NEWLINE_CR;
+ want_crlf = acceptable & SERF_NEWLINE_CRLF;
+ want_lf = acceptable & SERF_NEWLINE_LF;
+
+ if (want_cr || want_crlf) {
+ cr = memchr(start, '\r', *len);
+ }
+ if (want_lf) {
+ lf = memchr(start, '\n', *len);
+ }
+
+ if (cr != NULL) {
+ if (lf != NULL) {
+ if (cr + 1 == lf)
+ *found = want_crlf ? SERF_NEWLINE_CRLF : SERF_NEWLINE_CR;
+ else if (want_cr && cr < lf)
+ *found = SERF_NEWLINE_CR;
+ else
+ *found = SERF_NEWLINE_LF;
+ }
+ else if (cr == start + *len - 1) {
+ /* the CR occurred in the last byte of the buffer. this could be
+ * a CRLF split across the data boundary.
+ * ### FIX THIS LOGIC? does caller need to detect?
+ */
+ *found = want_crlf ? SERF_NEWLINE_CRLF_SPLIT : SERF_NEWLINE_CR;
+ }
+ else if (want_cr)
+ *found = SERF_NEWLINE_CR;
+ else /* want_crlf */
+ *found = SERF_NEWLINE_NONE;
+ }
+ else if (lf != NULL)
+ *found = SERF_NEWLINE_LF;
+ else
+ *found = SERF_NEWLINE_NONE;
+
+ switch (*found) {
+ case SERF_NEWLINE_LF:
+ *data = lf + 1;
+ break;
+ case SERF_NEWLINE_CR:
+ case SERF_NEWLINE_CRLF:
+ case SERF_NEWLINE_CRLF_SPLIT:
+ *data = cr + 1 + (*found == SERF_NEWLINE_CRLF);
+ break;
+ case SERF_NEWLINE_NONE:
+ *data += *len;
+ break;
+ default:
+ /* Not reachable */
+ return;
+ }
+
+ *len -= *data - start;
+}
+
+
+/* ==================================================================== */
+
+
+void serf_databuf_init(serf_databuf_t *databuf)
+{
+ /* nothing is sitting in the buffer */
+ databuf->remaining = 0;
+
+ /* avoid thinking we have hit EOF */
+ databuf->status = APR_SUCCESS;
+}
+
+/* Ensure the buffer is prepared for reading. Will return APR_SUCCESS,
+ * APR_EOF, or some failure code. *len is only set for EOF. */
+static apr_status_t common_databuf_prep(serf_databuf_t *databuf,
+ apr_size_t *len)
+{
+ apr_size_t readlen;
+ apr_status_t status;
+
+ /* if there is data in the buffer, then we're happy. */
+ if (databuf->remaining > 0)
+ return APR_SUCCESS;
+
+ /* if we already hit EOF, then keep returning that. */
+ if (APR_STATUS_IS_EOF(databuf->status)) {
+ /* *data = NULL; ?? */
+ *len = 0;
+ return APR_EOF;
+ }
+
+ /* refill the buffer */
+ status = (*databuf->read)(databuf->read_baton, sizeof(databuf->buf),
+ databuf->buf, &readlen);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ databuf->current = databuf->buf;
+ databuf->remaining = readlen;
+ databuf->status = status;
+
+ return APR_SUCCESS;
+}
+
+
+apr_status_t serf_databuf_read(
+ serf_databuf_t *databuf,
+ apr_size_t requested,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* peg the requested amount to what we have remaining */
+ if (requested == SERF_READ_ALL_AVAIL || requested > databuf->remaining)
+ requested = databuf->remaining;
+
+ /* return the values */
+ *data = databuf->current;
+ *len = requested;
+
+ /* adjust our internal state to note we've consumed some data */
+ databuf->current += requested;
+ databuf->remaining -= requested;
+
+ /* If we read everything, then we need to return whatever the data
+ * read returned to us. This is going to be APR_EOF or APR_EGAIN.
+ * If we have NOT read everything, then return APR_SUCCESS to indicate
+ * that we're ready to return some more if asked.
+ */
+ return databuf->remaining ? APR_SUCCESS : databuf->status;
+}
+
+
+apr_status_t serf_databuf_readline(
+ serf_databuf_t *databuf,
+ int acceptable,
+ int *found,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* the returned line will start at the current position. */
+ *data = databuf->current;
+
+ /* read a line from the buffer, and adjust the various pointers. */
+ serf_util_readline(&databuf->current, &databuf->remaining, acceptable,
+ found);
+
+ /* the length matches the amount consumed by the readline */
+ *len = databuf->current - *data;
+
+ /* see serf_databuf_read's return condition */
+ return databuf->remaining ? APR_SUCCESS : databuf->status;
+}
+
+
+apr_status_t serf_databuf_peek(
+ serf_databuf_t *databuf,
+ const char **data,
+ apr_size_t *len)
+{
+ apr_status_t status = common_databuf_prep(databuf, len);
+ if (status)
+ return status;
+
+ /* return everything we have */
+ *data = databuf->current;
+ *len = databuf->remaining;
+
+ /* If the last read returned EOF, then the peek should return the same.
+ * The other possibility in databuf->status is APR_EAGAIN, which we
+ * should never return. Thus, just return APR_SUCCESS for non-EOF cases.
+ */
+ if (APR_STATUS_IS_EOF(databuf->status))
+ return APR_EOF;
+ return APR_SUCCESS;
+}
+
+
+/* ==================================================================== */
+
+
+void serf_linebuf_init(serf_linebuf_t *linebuf)
+{
+ linebuf->state = SERF_LINEBUF_EMPTY;
+ linebuf->used = 0;
+}
+
+
+apr_status_t serf_linebuf_fetch(
+ serf_linebuf_t *linebuf,
+ serf_bucket_t *bucket,
+ int acceptable)
+{
+ /* If we had a complete line, then assume the caller has used it, so
+ * we can now reset the state.
+ */
+ if (linebuf->state == SERF_LINEBUF_READY) {
+ linebuf->state = SERF_LINEBUF_EMPTY;
+
+ /* Reset the line_used, too, so we don't have to test the state
+ * before using this value.
+ */
+ linebuf->used = 0;
+ }
+
+ while (1) {
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ if (linebuf->state == SERF_LINEBUF_CRLF_SPLIT) {
+ /* On the previous read, we received just a CR. The LF might
+ * be present, but the bucket couldn't see it. We need to
+ * examine a single character to determine how to handle the
+ * split CRLF.
+ */
+
+ status = serf_bucket_peek(bucket, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (len > 0) {
+ if (*data == '\n') {
+ /* We saw the second part of CRLF. We don't need to
+ * save that character, so do an actual read to suck
+ * up that character.
+ */
+ /* ### check status */
+ (void) serf_bucket_read(bucket, 1, &data, &len);
+ }
+ /* else:
+ * We saw the first character of the next line. Thus,
+ * the current line is terminated by the CR. Just
+ * ignore whatever we peeked at. The next reader will
+ * see it and handle it as appropriate.
+ */
+
+ /* Whatever was read, the line is now ready for use. */
+ linebuf->state = SERF_LINEBUF_READY;
+ }
+ /* ### we need data. gotta check this char. bail if zero?! */
+ /* else len == 0 */
+
+ /* ### status */
+ }
+ else {
+ int found;
+
+ status = serf_bucket_readline(bucket, acceptable, &found,
+ &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Some bucket types (socket) might need an extra read to find
+ out EOF state, so they'll return no data in that read. This
+ means we're done reading, return what we got. */
+ if (APR_STATUS_IS_EOF(status) && len == 0) {
+ return status;
+ }
+ if (linebuf->used + len > sizeof(linebuf->line)) {
+ /* ### need a "line too long" error */
+ return APR_EGENERAL;
+ }
+
+ /* Note: our logic doesn't change for SERF_LINEBUF_PARTIAL. That
+ * only affects how we fill the buffer. It is a communication to
+ * our caller on whether the line is ready or not.
+ */
+
+ /* If we didn't see a newline, then we should mark the line
+ * buffer as partially complete.
+ */
+ if (found == SERF_NEWLINE_NONE) {
+ linebuf->state = SERF_LINEBUF_PARTIAL;
+ }
+ else if (found == SERF_NEWLINE_CRLF_SPLIT) {
+ linebuf->state = SERF_LINEBUF_CRLF_SPLIT;
+
+ /* Toss the partial CR. We won't ever need it. */
+ --len;
+ }
+ else {
+ /* We got a newline (of some form). We don't need it
+ * in the line buffer, so back up the length. Then
+ * mark the line as ready.
+ */
+ len -= 1 + (found == SERF_NEWLINE_CRLF);
+
+ linebuf->state = SERF_LINEBUF_READY;
+ }
+
+ /* ### it would be nice to avoid this copy if at all possible,
+ ### and just return the a data/len pair to the caller. we're
+ ### keeping it simple for now. */
+ memcpy(&linebuf->line[linebuf->used], data, len);
+ linebuf->used += len;
+ }
+
+ /* If we saw anything besides "success. please read again", then
+ * we should return that status. If the line was completed, then
+ * we should also return.
+ */
+ if (status || linebuf->state == SERF_LINEBUF_READY)
+ return status;
+
+ /* We got APR_SUCCESS and the line buffer is not complete. Let's
+ * loop to read some more data.
+ */
+ }
+ /* NOTREACHED */
+}
diff --git a/buckets/bwtp_buckets.c b/buckets/bwtp_buckets.c
new file mode 100644
index 0000000..7ef3047
--- /dev/null
+++ b/buckets/bwtp_buckets.c
@@ -0,0 +1,596 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+#include <apr_lib.h>
+#include <apr_date.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+#include "serf_bucket_types.h"
+
+#include <stdlib.h>
+
+/* This is an implementation of Bidirectional Web Transfer Protocol (BWTP)
+ * See:
+ * http://bwtp.wikidot.com/
+ */
+
+typedef struct {
+ int channel;
+ int open;
+ int type; /* 0 = header, 1 = message */ /* TODO enum? */
+ const char *phrase;
+ serf_bucket_t *headers;
+
+ char req_line[1000];
+} frame_context_t;
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
+ serf_bucket_t *headers; /* holds parsed headers */
+
+ enum {
+ STATE_STATUS_LINE, /* reading status line */
+ STATE_HEADERS, /* reading headers */
+ STATE_BODY, /* reading body */
+ STATE_DONE /* we've sent EOF */
+ } state;
+
+ /* Buffer for accumulating a line from the response. */
+ serf_linebuf_t linebuf;
+
+ int type; /* 0 = header, 1 = message */ /* TODO enum? */
+ int channel;
+ char *phrase;
+ apr_size_t length;
+} incoming_context_t;
+
+
+serf_bucket_t *serf_bucket_bwtp_channel_close(
+ int channel,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = "CLOSED";
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_channel_open(
+ int channel,
+ const char *uri,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 1;
+ ctx->channel = channel;
+ ctx->phrase = uri;
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_header_create(
+ int channel,
+ const char *phrase,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 0;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = phrase;
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_bwtp_message_create(
+ int channel,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ frame_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->type = 1;
+ ctx->open = 0;
+ ctx->channel = channel;
+ ctx->phrase = "MESSAGE";
+ ctx->headers = serf_bucket_headers_create(allocator);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_frame, allocator, ctx);
+}
+
+int serf_bucket_bwtp_frame_get_channel(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->channel;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->channel;
+ }
+
+ return -1;
+}
+
+int serf_bucket_bwtp_frame_get_type(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->type;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->type;
+ }
+
+ return -1;
+}
+
+const char *serf_bucket_bwtp_frame_get_phrase(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->phrase;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->phrase;
+ }
+
+ return NULL;
+}
+
+serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
+ serf_bucket_t *bucket)
+{
+ if (SERF_BUCKET_IS_BWTP_FRAME(bucket)) {
+ frame_context_t *ctx = bucket->data;
+
+ return ctx->headers;
+ }
+ else if (SERF_BUCKET_IS_BWTP_INCOMING_FRAME(bucket)) {
+ incoming_context_t *ctx = bucket->data;
+
+ return ctx->headers;
+ }
+
+ return NULL;
+}
+
+static int count_size(void *baton, const char *key, const char *value)
+{
+ apr_size_t *c = baton;
+ /* TODO Deal with folding. Yikes. */
+
+ /* Add in ": " and CRLF - so an extra four bytes. */
+ *c += strlen(key) + strlen(value) + 4;
+
+ return 0;
+}
+
+static apr_size_t calc_header_size(serf_bucket_t *hdrs)
+{
+ apr_size_t size = 0;
+
+ serf_bucket_headers_do(hdrs, count_size, &size);
+
+ return size;
+}
+
+static void serialize_data(serf_bucket_t *bucket)
+{
+ frame_context_t *ctx = bucket->data;
+ serf_bucket_t *new_bucket;
+ apr_size_t req_len;
+
+ /* Serialize the request-line and headers into one mother string,
+ * and wrap a bucket around it.
+ */
+ req_len = apr_snprintf(ctx->req_line, sizeof(ctx->req_line),
+ "%s %d " "%" APR_UINT64_T_HEX_FMT " %s%s\r\n",
+ (ctx->type ? "BWM" : "BWH"),
+ ctx->channel, calc_header_size(ctx->headers),
+ (ctx->open ? "OPEN " : ""),
+ ctx->phrase);
+ new_bucket = serf_bucket_simple_copy_create(ctx->req_line, req_len,
+ bucket->allocator);
+
+ /* Build up the new bucket structure.
+ *
+ * Note that self needs to become an aggregate bucket so that a
+ * pointer to self still represents the "right" data.
+ */
+ serf_bucket_aggregate_become(bucket);
+
+ /* Insert the two buckets. */
+ serf_bucket_aggregate_append(bucket, new_bucket);
+ serf_bucket_aggregate_append(bucket, ctx->headers);
+
+ /* Our private context is no longer needed, and is not referred to by
+ * any existing bucket. Toss it.
+ */
+ serf_bucket_mem_free(bucket->allocator, ctx);
+}
+
+static apr_status_t serf_bwtp_frame_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read(bucket, requested, data, len);
+}
+
+static apr_status_t serf_bwtp_frame_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the readline. */
+ return serf_bucket_readline(bucket, acceptable, found, data, len);
+}
+
+static apr_status_t serf_bwtp_frame_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read_iovec(bucket, requested,
+ vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_bwtp_frame_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the peek. */
+ return serf_bucket_peek(bucket, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_bwtp_frame = {
+ "BWTP-FRAME",
+ serf_bwtp_frame_read,
+ serf_bwtp_frame_readline,
+ serf_bwtp_frame_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_bwtp_frame_peek,
+ serf_default_destroy_and_data,
+};
+
+
+serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ incoming_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->body = NULL;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->state = STATE_STATUS_LINE;
+ ctx->length = 0;
+ ctx->channel = -1;
+ ctx->phrase = NULL;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_bwtp_incoming_frame, allocator, ctx);
+}
+
+static void bwtp_incoming_destroy_and_data(serf_bucket_t *bucket)
+{
+ incoming_context_t *ctx = bucket->data;
+
+ if (ctx->state != STATE_STATUS_LINE && ctx->phrase) {
+ serf_bucket_mem_free(bucket->allocator, (void*)ctx->phrase);
+ }
+
+ serf_bucket_destroy(ctx->stream);
+ if (ctx->body != NULL)
+ serf_bucket_destroy(ctx->body);
+ serf_bucket_destroy(ctx->headers);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t fetch_line(incoming_context_t *ctx, int acceptable)
+{
+ return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
+}
+
+static apr_status_t parse_status_line(incoming_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ int res;
+ char *reason; /* ### stupid APR interface makes this non-const */
+
+ /* ctx->linebuf.line should be of form: BW* */
+ res = apr_date_checkmask(ctx->linebuf.line, "BW*");
+ if (!res) {
+ /* Not an BWTP response? Well, at least we won't understand it. */
+ return APR_EGENERAL;
+ }
+
+ if (ctx->linebuf.line[2] == 'H') {
+ ctx->type = 0;
+ }
+ else if (ctx->linebuf.line[2] == 'M') {
+ ctx->type = 1;
+ }
+ else {
+ ctx->type = -1;
+ }
+
+ ctx->channel = apr_strtoi64(ctx->linebuf.line + 3, &reason, 16);
+
+ /* Skip leading spaces for the reason string. */
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ ctx->length = apr_strtoi64(reason, &reason, 16);
+
+ /* Skip leading spaces for the reason string. */
+ if (reason - ctx->linebuf.line < ctx->linebuf.used) {
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ ctx->phrase = serf_bstrmemdup(allocator, reason,
+ ctx->linebuf.used
+ - (reason - ctx->linebuf.line));
+ } else {
+ ctx->phrase = NULL;
+ }
+
+ return APR_SUCCESS;
+}
+
+/* This code should be replaced with header buckets. */
+static apr_status_t fetch_headers(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Something was read. Process it. */
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ const char *end_key;
+ const char *c;
+
+ end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
+ if (!c) {
+ /* Bad headers? */
+ return APR_EGENERAL;
+ }
+
+ /* Skip over initial : and spaces. */
+ while (apr_isspace(*++c))
+ continue;
+
+ /* Always copy the headers (from the linebuf into new mem). */
+ /* ### we should be able to optimize some mem copies */
+ serf_bucket_headers_setx(
+ ctx->headers,
+ ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
+ c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
+ }
+
+ return status;
+}
+
+/* Perform one iteration of the state machine.
+ *
+ * Will return when one the following conditions occurred:
+ * 1) a state change
+ * 2) an error
+ * 3) the stream is not ready or at EOF
+ * 4) APR_SUCCESS, meaning the machine can be run again immediately
+ */
+static apr_status_t run_machine(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
+
+ switch (ctx->state) {
+ case STATE_STATUS_LINE:
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ /* The Status-Line is in the line buffer. Process it. */
+ status = parse_status_line(ctx, bkt->allocator);
+ if (status)
+ return status;
+
+ if (ctx->length) {
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+ ctx->body = serf_bucket_limit_create(ctx->body, ctx->length,
+ bkt->allocator);
+ if (!ctx->type) {
+ ctx->state = STATE_HEADERS;
+ } else {
+ ctx->state = STATE_BODY;
+ }
+ } else {
+ ctx->state = STATE_DONE;
+ }
+ }
+ else {
+ /* The connection closed before we could get the next
+ * response. Treat the request as lost so that our upper
+ * end knows the server never tried to give us a response.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ return SERF_ERROR_REQUEST_LOST;
+ }
+ }
+ break;
+ case STATE_HEADERS:
+ status = fetch_headers(ctx->body, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we hit the end of the headers.
+ * Move on to the body.
+ */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ /* Advance the state. */
+ ctx->state = STATE_DONE;
+ }
+ break;
+ case STATE_BODY:
+ /* Don't do anything. */
+ break;
+ case STATE_DONE:
+ return APR_EOF;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ return status;
+}
+
+static apr_status_t wait_for_body(serf_bucket_t *bkt, incoming_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* Keep reading and moving through states if we aren't at the BODY */
+ while (ctx->state != STATE_BODY) {
+ status = run_machine(bkt, ctx);
+
+ /* Anything other than APR_SUCCESS means that we cannot immediately
+ * read again (for now).
+ */
+ if (status)
+ return status;
+ }
+ /* in STATE_BODY */
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
+ serf_bucket_t *bucket)
+{
+ incoming_context_t *ctx = bucket->data;
+
+ return wait_for_body(bucket, ctx);
+}
+
+static apr_status_t bwtp_incoming_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ incoming_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ /* It's not possible to have read anything yet! */
+ if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
+ *len = 0;
+ }
+ return rv;
+ }
+
+ rv = serf_bucket_read(ctx->body, requested, data, len);
+ if (APR_STATUS_IS_EOF(rv)) {
+ ctx->state = STATE_DONE;
+ }
+ return rv;
+}
+
+static apr_status_t bwtp_incoming_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ incoming_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ return rv;
+ }
+
+ /* Delegate to the stream bucket to do the readline. */
+ return serf_bucket_readline(ctx->body, acceptable, found, data, len);
+}
+
+/* ### need to implement */
+#define bwtp_incoming_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame = {
+ "BWTP-INCOMING",
+ bwtp_incoming_read,
+ bwtp_incoming_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ bwtp_incoming_peek,
+ bwtp_incoming_destroy_and_data,
+};
diff --git a/buckets/chunk_buckets.c b/buckets/chunk_buckets.c
new file mode 100644
index 0000000..7f25508
--- /dev/null
+++ b/buckets/chunk_buckets.c
@@ -0,0 +1,235 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ enum {
+ STATE_FETCH,
+ STATE_CHUNK,
+ STATE_EOF
+ } state;
+
+ apr_status_t last_status;
+
+ serf_bucket_t *chunk;
+ serf_bucket_t *stream;
+
+ char chunk_hdr[20];
+} chunk_context_t;
+
+
+serf_bucket_t *serf_bucket_chunk_create(
+ serf_bucket_t *stream, serf_bucket_alloc_t *allocator)
+{
+ chunk_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->state = STATE_FETCH;
+ ctx->chunk = serf_bucket_aggregate_create(allocator);
+ ctx->stream = stream;
+
+ return serf_bucket_create(&serf_bucket_type_chunk, allocator, ctx);
+}
+
+#define CRLF "\r\n"
+
+static apr_status_t create_chunk(serf_bucket_t *bucket)
+{
+ chunk_context_t *ctx = bucket->data;
+ serf_bucket_t *simple_bkt;
+ apr_size_t chunk_len;
+ apr_size_t stream_len;
+ struct iovec vecs[66]; /* 64 + chunk trailer + EOF trailer = 66 */
+ int vecs_read;
+ int i;
+
+ if (ctx->state != STATE_FETCH) {
+ return APR_SUCCESS;
+ }
+
+ ctx->last_status =
+ serf_bucket_read_iovec(ctx->stream, SERF_READ_ALL_AVAIL,
+ 64, vecs, &vecs_read);
+
+ if (SERF_BUCKET_READ_ERROR(ctx->last_status)) {
+ /* Uh-oh. */
+ return ctx->last_status;
+ }
+
+ /* Count the length of the data we read. */
+ stream_len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ stream_len += vecs[i].iov_len;
+ }
+
+ /* assert: stream_len in hex < sizeof(ctx->chunk_hdr) */
+
+ /* Inserting a 0 byte chunk indicates a terminator, which already happens
+ * during the EOF handler below. Adding another one here will cause the
+ * EOF chunk to be interpreted by the server as a new request. So,
+ * we'll only do this if we have something to write.
+ */
+ if (stream_len) {
+ /* Build the chunk header. */
+ chunk_len = apr_snprintf(ctx->chunk_hdr, sizeof(ctx->chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF,
+ (apr_uint64_t)stream_len);
+
+ /* Create a copy of the chunk header so we can have multiple chunks
+ * in the pipeline at the same time.
+ */
+ simple_bkt = serf_bucket_simple_copy_create(ctx->chunk_hdr, chunk_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->chunk, simple_bkt);
+
+ /* Insert the chunk footer. */
+ vecs[vecs_read].iov_base = CRLF;
+ vecs[vecs_read++].iov_len = sizeof(CRLF) - 1;
+ }
+
+ /* We've reached the end of the line for the stream. */
+ if (APR_STATUS_IS_EOF(ctx->last_status)) {
+ /* Insert the chunk footer. */
+ vecs[vecs_read].iov_base = "0" CRLF CRLF;
+ vecs[vecs_read++].iov_len = sizeof("0" CRLF CRLF) - 1;
+
+ ctx->state = STATE_EOF;
+ }
+ else {
+ /* Okay, we can return data. */
+ ctx->state = STATE_CHUNK;
+ }
+
+ serf_bucket_aggregate_append_iovec(ctx->chunk, vecs, vecs_read);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_chunk_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* Before proceeding, we need to fetch some data from the stream. */
+ if (ctx->state == STATE_FETCH) {
+ status = create_chunk(bucket);
+ if (status) {
+ return status;
+ }
+ }
+
+ status = serf_bucket_read(ctx->chunk, requested, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = ctx->last_status;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ status = serf_bucket_readline(ctx->chunk, acceptable, found, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = APR_EAGAIN;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* Before proceeding, we need to fetch some data from the stream. */
+ if (ctx->state == STATE_FETCH) {
+ status = create_chunk(bucket);
+ if (status) {
+ return status;
+ }
+ }
+
+ status = serf_bucket_read_iovec(ctx->chunk, requested, vecs_size, vecs,
+ vecs_used);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = ctx->last_status;
+ ctx->state = STATE_FETCH;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_chunk_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ chunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ status = serf_bucket_peek(ctx->chunk, data, len);
+
+ /* Mask EOF from aggregate bucket. */
+ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) {
+ status = APR_EAGAIN;
+ }
+
+ return status;
+}
+
+static void serf_chunk_destroy(serf_bucket_t *bucket)
+{
+ chunk_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+ serf_bucket_destroy(ctx->chunk);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_chunk = {
+ "CHUNK",
+ serf_chunk_read,
+ serf_chunk_readline,
+ serf_chunk_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_chunk_peek,
+ serf_chunk_destroy,
+};
diff --git a/buckets/dechunk_buckets.c b/buckets/dechunk_buckets.c
new file mode 100644
index 0000000..28dd671
--- /dev/null
+++ b/buckets/dechunk_buckets.c
@@ -0,0 +1,185 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+typedef struct {
+ serf_bucket_t *stream;
+
+ enum {
+ STATE_SIZE, /* reading the chunk size */
+ STATE_CHUNK, /* reading the chunk */
+ STATE_TERM, /* reading the chunk terminator */
+ STATE_DONE /* body is done; we've returned EOF */
+ } state;
+
+ /* Buffer for accumulating a chunk size. */
+ serf_linebuf_t linebuf;
+
+ /* How much of the chunk, or the terminator, do we have left to read? */
+ apr_int64_t body_left;
+
+} dechunk_context_t;
+
+
+serf_bucket_t *serf_bucket_dechunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ dechunk_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->state = STATE_SIZE;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_dechunk, allocator, ctx);
+}
+
+static void serf_dechunk_destroy_and_data(serf_bucket_t *bucket)
+{
+ dechunk_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_dechunk_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ dechunk_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ while (1) {
+ switch (ctx->state) {
+ case STATE_SIZE:
+
+ /* fetch a line terminated by CRLF */
+ status = serf_linebuf_fetch(&ctx->linebuf, ctx->stream,
+ SERF_NEWLINE_CRLF);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* if a line was read, then parse it. */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY) {
+ /* NUL-terminate the line. if it filled the entire buffer,
+ then just assume the thing is too large. */
+ if (ctx->linebuf.used == sizeof(ctx->linebuf.line))
+ return APR_FROM_OS_ERROR(ERANGE);
+ ctx->linebuf.line[ctx->linebuf.used] = '\0';
+
+ /* convert from HEX digits. */
+ ctx->body_left = apr_strtoi64(ctx->linebuf.line, NULL, 16);
+ if (errno == ERANGE) {
+ return APR_FROM_OS_ERROR(ERANGE);
+ }
+
+ if (ctx->body_left == 0) {
+ /* Just read the last-chunk marker. We're DONE. */
+ ctx->state = STATE_DONE;
+ status = APR_EOF;
+ }
+ else {
+ /* Got a size, so we'll start reading the chunk now. */
+ ctx->state = STATE_CHUNK;
+ }
+
+ /* If we can read more, then go do so. */
+ if (!status)
+ continue;
+ }
+ /* assert: status != 0 */
+
+ /* Note that we didn't actually read anything, so our callers
+ * don't get confused.
+ */
+ *len = 0;
+
+ return status;
+
+ case STATE_CHUNK:
+
+ if (requested > ctx->body_left) {
+ requested = ctx->body_left;
+ }
+
+ /* Delegate to the stream bucket to do the read. */
+ status = serf_bucket_read(ctx->stream, requested, data, len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Some data was read, so decrement the amount left and see
+ * if we're done reading this chunk.
+ */
+ ctx->body_left -= *len;
+ if (!ctx->body_left) {
+ ctx->state = STATE_TERM;
+ ctx->body_left = 2; /* CRLF */
+ }
+
+ /* Return the data we just read. */
+ return status;
+
+ case STATE_TERM:
+ /* Delegate to the stream bucket to do the read. */
+ status = serf_bucket_read(ctx->stream, ctx->body_left, data, len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* Some data was read, so decrement the amount left and see
+ * if we're done reading the chunk terminator.
+ */
+ ctx->body_left -= *len;
+ if (!ctx->body_left) {
+ ctx->state = STATE_SIZE;
+ }
+
+ if (status)
+ return status;
+
+ break;
+
+ case STATE_DONE:
+ /* Just keep returning EOF */
+ return APR_EOF;
+
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/* ### need to implement */
+#define serf_dechunk_readline NULL
+#define serf_dechunk_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_dechunk = {
+ "DECHUNK",
+ serf_dechunk_read,
+ serf_dechunk_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_dechunk_peek,
+ serf_dechunk_destroy_and_data,
+};
diff --git a/buckets/deflate_buckets.c b/buckets/deflate_buckets.c
new file mode 100644
index 0000000..7a8e8e4
--- /dev/null
+++ b/buckets/deflate_buckets.c
@@ -0,0 +1,384 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_strings.h>
+
+#include <zlib.h>
+
+/* This conditional isn't defined anywhere yet. */
+#ifdef HAVE_ZUTIL_H
+#include <zutil.h>
+#endif
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+/* magic header */
+static char deflate_magic[2] = { '\037', '\213' };
+#define DEFLATE_MAGIC_SIZE 10
+#define DEFLATE_VERIFY_SIZE 8
+#define DEFLATE_BUFFER_SIZE 8096
+
+static const int DEFLATE_WINDOW_SIZE = -15;
+static const int DEFLATE_MEMLEVEL = 9;
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *inflate_stream;
+
+ int format; /* Are we 'deflate' or 'gzip'? */
+
+ enum {
+ STATE_READING_HEADER, /* reading the gzip header */
+ STATE_HEADER, /* read the gzip header */
+ STATE_INIT, /* init'ing zlib functions */
+ STATE_INFLATE, /* inflating the content now */
+ STATE_READING_VERIFY, /* reading the final gzip CRC */
+ STATE_VERIFY, /* verifying the final gzip CRC */
+ STATE_FINISH, /* clean up after reading body */
+ STATE_DONE, /* body is done; we'll return EOF here */
+ } state;
+
+ z_stream zstream;
+ char hdr_buffer[DEFLATE_MAGIC_SIZE];
+ unsigned char buffer[DEFLATE_BUFFER_SIZE];
+ unsigned long crc;
+ int windowSize;
+ int memLevel;
+ int bufferSize;
+
+ /* How much of the chunk, or the terminator, do we have left to read? */
+ apr_size_t stream_left;
+
+ /* How much are we supposed to read? */
+ apr_size_t stream_size;
+
+ int stream_status; /* What was the last status we read? */
+
+} deflate_context_t;
+
+/* Inputs a string and returns a long. */
+static unsigned long getLong(unsigned char *string)
+{
+ return ((unsigned long)string[0])
+ | (((unsigned long)string[1]) << 8)
+ | (((unsigned long)string[2]) << 16)
+ | (((unsigned long)string[3]) << 24);
+}
+
+serf_bucket_t *serf_bucket_deflate_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator,
+ int format)
+{
+ deflate_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->stream_status = APR_SUCCESS;
+ ctx->inflate_stream = serf_bucket_aggregate_create(allocator);
+ ctx->format = format;
+ ctx->crc = 0;
+ /* zstream must be NULL'd out. */
+ memset(&ctx->zstream, 0, sizeof(ctx->zstream));
+
+ switch (ctx->format) {
+ case SERF_DEFLATE_GZIP:
+ ctx->state = STATE_READING_HEADER;
+ break;
+ case SERF_DEFLATE_DEFLATE:
+ /* deflate doesn't have a header. */
+ ctx->state = STATE_INIT;
+ break;
+ default:
+ /* Not reachable */
+ return NULL;
+ }
+
+ /* Initial size of gzip header. */
+ ctx->stream_left = ctx->stream_size = DEFLATE_MAGIC_SIZE;
+
+ ctx->windowSize = DEFLATE_WINDOW_SIZE;
+ ctx->memLevel = DEFLATE_MEMLEVEL;
+ ctx->bufferSize = DEFLATE_BUFFER_SIZE;
+
+ return serf_bucket_create(&serf_bucket_type_deflate, allocator, ctx);
+}
+
+static void serf_deflate_destroy_and_data(serf_bucket_t *bucket)
+{
+ deflate_context_t *ctx = bucket->data;
+
+ if (ctx->state > STATE_INIT &&
+ ctx->state <= STATE_FINISH)
+ inflateEnd(&ctx->zstream);
+
+ /* We may have appended inflate_stream into the stream bucket.
+ * If so, avoid free'ing it twice.
+ */
+ if (ctx->inflate_stream) {
+ serf_bucket_destroy(ctx->inflate_stream);
+ }
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_deflate_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ deflate_context_t *ctx = bucket->data;
+ unsigned long compCRC, compLen;
+ apr_status_t status;
+ const char *private_data;
+ apr_size_t private_len;
+ int zRC;
+
+ while (1) {
+ switch (ctx->state) {
+ case STATE_READING_HEADER:
+ case STATE_READING_VERIFY:
+ status = serf_bucket_read(ctx->stream, ctx->stream_left,
+ &private_data, &private_len);
+
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ memcpy(ctx->hdr_buffer + (ctx->stream_size - ctx->stream_left),
+ private_data, private_len);
+
+ ctx->stream_left -= private_len;
+
+ if (ctx->stream_left == 0) {
+ ctx->state++;
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ *len = 0;
+ return status;
+ }
+ }
+ else if (status) {
+ *len = 0;
+ return status;
+ }
+ break;
+ case STATE_HEADER:
+ if (ctx->hdr_buffer[0] != deflate_magic[0] ||
+ ctx->hdr_buffer[1] != deflate_magic[1]) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ if (ctx->hdr_buffer[3] != 0) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->state++;
+ break;
+ case STATE_VERIFY:
+ /* Do the checksum computation. */
+ compCRC = getLong((unsigned char*)ctx->hdr_buffer);
+ if (ctx->crc != compCRC) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ compLen = getLong((unsigned char*)ctx->hdr_buffer + 4);
+ if (ctx->zstream.total_out != compLen) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->state++;
+ break;
+ case STATE_INIT:
+ zRC = inflateInit2(&ctx->zstream, ctx->windowSize);
+ if (zRC != Z_OK) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ ctx->zstream.next_out = ctx->buffer;
+ ctx->zstream.avail_out = ctx->bufferSize;
+ ctx->state++;
+ break;
+ case STATE_FINISH:
+ inflateEnd(&ctx->zstream);
+ serf_bucket_aggregate_prepend(ctx->stream, ctx->inflate_stream);
+ ctx->inflate_stream = 0;
+ ctx->state++;
+ break;
+ case STATE_INFLATE:
+ /* Do we have anything already uncompressed to read? */
+ status = serf_bucket_read(ctx->inflate_stream, requested, data,
+ len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Hide EOF. */
+ if (APR_STATUS_IS_EOF(status)) {
+ status = ctx->stream_status;
+ if (APR_STATUS_IS_EOF(status)) {
+ /* We've read all of the data from our stream, but we
+ * need to continue to iterate until we flush
+ * out the zlib buffer.
+ */
+ status = APR_SUCCESS;
+ }
+ }
+ if (*len != 0) {
+ return status;
+ }
+
+ /* We tried; but we have nothing buffered. Fetch more. */
+
+ /* It is possible that we maxed out avail_out before
+ * exhausting avail_in; therefore, continue using the
+ * previous buffer. Otherwise, fetch more data from
+ * our stream bucket.
+ */
+ if (ctx->zstream.avail_in == 0) {
+ /* When we empty our inflated stream, we'll return this
+ * status - this allow us to eventually pass up EAGAINs.
+ */
+ ctx->stream_status = serf_bucket_read(ctx->stream,
+ ctx->bufferSize,
+ &private_data,
+ &private_len);
+
+ if (SERF_BUCKET_READ_ERROR(ctx->stream_status)) {
+ return ctx->stream_status;
+ }
+
+ if (!private_len && APR_STATUS_IS_EAGAIN(ctx->stream_status)) {
+ *len = 0;
+ status = ctx->stream_status;
+ ctx->stream_status = APR_SUCCESS;
+ return status;
+ }
+
+ ctx->zstream.next_in = (unsigned char*)private_data;
+ ctx->zstream.avail_in = private_len;
+ }
+ zRC = Z_OK;
+ while (ctx->zstream.avail_in != 0) {
+ /* We're full, clear out our buffer, reset, and return. */
+ if (ctx->zstream.avail_out == 0) {
+ serf_bucket_t *tmp;
+ ctx->zstream.next_out = ctx->buffer;
+ private_len = ctx->bufferSize - ctx->zstream.avail_out;
+
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
+ private_len);
+
+ /* FIXME: There probably needs to be a free func. */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
+ private_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
+ ctx->zstream.avail_out = ctx->bufferSize;
+ break;
+ }
+ zRC = inflate(&ctx->zstream, Z_NO_FLUSH);
+
+ if (zRC == Z_STREAM_END) {
+ serf_bucket_t *tmp;
+
+ private_len = ctx->bufferSize - ctx->zstream.avail_out;
+ ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer,
+ private_len);
+ /* FIXME: There probably needs to be a free func. */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer,
+ private_len,
+ bucket->allocator);
+ serf_bucket_aggregate_append(ctx->inflate_stream, tmp);
+
+ ctx->zstream.avail_out = ctx->bufferSize;
+
+ /* Push back the remaining data to be read. */
+ tmp = serf_bucket_aggregate_create(bucket->allocator);
+ serf_bucket_aggregate_prepend(tmp, ctx->stream);
+ ctx->stream = tmp;
+
+ /* We now need to take the remaining avail_in and
+ * throw it in ctx->stream so our next read picks it up.
+ */
+ tmp = SERF_BUCKET_SIMPLE_STRING_LEN(
+ (const char*)ctx->zstream.next_in,
+ ctx->zstream.avail_in,
+ bucket->allocator);
+ serf_bucket_aggregate_prepend(ctx->stream, tmp);
+
+ switch (ctx->format) {
+ case SERF_DEFLATE_GZIP:
+ ctx->stream_left = ctx->stream_size =
+ DEFLATE_VERIFY_SIZE;
+ ctx->state++;
+ break;
+ case SERF_DEFLATE_DEFLATE:
+ /* Deflate does not have a verify footer. */
+ ctx->state = STATE_FINISH;
+ break;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ break;
+ }
+ if (zRC != Z_OK) {
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ }
+ /* Okay, we've inflated. Try to read. */
+ status = serf_bucket_read(ctx->inflate_stream, requested, data,
+ len);
+ /* Hide EOF. */
+ if (APR_STATUS_IS_EOF(status)) {
+ status = ctx->stream_status;
+ /* If our stream is finished too, return SUCCESS so
+ * we'll iterate one more time.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ /* No more data to read from the stream, and everything
+ inflated. If all data was received correctly, state
+ should have been advanced to STATE_READING_VERIFY or
+ STATE_FINISH. If not, then the data was incomplete
+ and we have an error. */
+ if (ctx->state != STATE_INFLATE)
+ return APR_SUCCESS;
+ else
+ return SERF_ERROR_DECOMPRESSION_FAILED;
+ }
+ }
+ return status;
+ case STATE_DONE:
+ /* We're done inflating. Use our finished buffer. */
+ return serf_bucket_read(ctx->stream, requested, data, len);
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+ }
+
+ /* NOTREACHED */
+}
+
+/* ### need to implement */
+#define serf_deflate_readline NULL
+#define serf_deflate_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_deflate = {
+ "DEFLATE",
+ serf_deflate_read,
+ serf_deflate_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_deflate_peek,
+ serf_deflate_destroy_and_data,
+};
diff --git a/buckets/file_buckets.c b/buckets/file_buckets.c
new file mode 100644
index 0000000..bd41cab
--- /dev/null
+++ b/buckets/file_buckets.c
@@ -0,0 +1,117 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+typedef struct {
+ apr_file_t *file;
+
+ serf_databuf_t databuf;
+
+} file_context_t;
+
+
+static apr_status_t file_reader(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ file_context_t *ctx = baton;
+
+ *len = bufsize;
+ return apr_file_read(ctx->file, buf, len);
+}
+
+serf_bucket_t *serf_bucket_file_create(
+ apr_file_t *file,
+ serf_bucket_alloc_t *allocator)
+{
+ file_context_t *ctx;
+#if APR_HAS_MMAP
+ apr_finfo_t finfo;
+ const char *file_path;
+
+ /* See if we'd be better off mmap'ing this file instead.
+ *
+ * Note that there is a failure case here that we purposely fall through:
+ * if a file is buffered, apr_mmap will reject it. However, on older
+ * versions of APR, we have no way of knowing this - but apr_mmap_create
+ * will check for this and return APR_EBADF.
+ */
+ apr_file_name_get(&file_path, file);
+ apr_stat(&finfo, file_path, APR_FINFO_SIZE,
+ serf_bucket_allocator_get_pool(allocator));
+ if (APR_MMAP_CANDIDATE(finfo.size)) {
+ apr_status_t status;
+ apr_mmap_t *file_mmap;
+ status = apr_mmap_create(&file_mmap, file, 0, finfo.size,
+ APR_MMAP_READ,
+ serf_bucket_allocator_get_pool(allocator));
+
+ if (status == APR_SUCCESS) {
+ return serf_bucket_mmap_create(file_mmap, allocator);
+ }
+ }
+#endif
+
+ /* Oh, well. */
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->file = file;
+
+ serf_databuf_init(&ctx->databuf);
+ ctx->databuf.read = file_reader;
+ ctx->databuf.read_baton = ctx;
+
+ return serf_bucket_create(&serf_bucket_type_file, allocator, ctx);
+}
+
+static apr_status_t serf_file_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(&ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_file_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_file_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ file_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(&ctx->databuf, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_file = {
+ "FILE",
+ serf_file_read,
+ serf_file_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_file_peek,
+ serf_default_destroy_and_data,
+};
diff --git a/buckets/headers_buckets.c b/buckets/headers_buckets.c
new file mode 100644
index 0000000..8bf91b4
--- /dev/null
+++ b/buckets/headers_buckets.c
@@ -0,0 +1,429 @@
+/* Copyright 2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr_general.h> /* for strcasecmp() */
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct header_list {
+ const char *header;
+ const char *value;
+
+ apr_size_t header_size;
+ apr_size_t value_size;
+
+ int alloc_flags;
+#define ALLOC_HEADER 0x0001 /* header lives in our allocator */
+#define ALLOC_VALUE 0x0002 /* value lives in our allocator */
+
+ struct header_list *next;
+} header_list_t;
+
+typedef struct {
+ header_list_t *list;
+
+ header_list_t *cur_read;
+ enum {
+ READ_START, /* haven't started reading yet */
+ READ_HEADER, /* reading cur_read->header */
+ READ_SEP, /* reading ": " */
+ READ_VALUE, /* reading cur_read->value */
+ READ_CRLF, /* reading "\r\n" */
+ READ_TERM, /* reading the final "\r\n" */
+ READ_DONE /* no more data to read */
+ } state;
+ apr_size_t amt_read; /* how much of the current state we've read */
+
+} headers_context_t;
+
+
+serf_bucket_t *serf_bucket_headers_create(
+ serf_bucket_alloc_t *allocator)
+{
+ headers_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->list = NULL;
+ ctx->state = READ_START;
+
+ return serf_bucket_create(&serf_bucket_type_headers, allocator, ctx);
+}
+
+void serf_bucket_headers_setx(
+ serf_bucket_t *bkt,
+ const char *header, apr_size_t header_size, int header_copy,
+ const char *value, apr_size_t value_size, int value_copy)
+{
+ headers_context_t *ctx = bkt->data;
+ header_list_t *iter = ctx->list;
+ header_list_t *hdr;
+
+#if 0
+ /* ### include this? */
+ if (ctx->cur_read) {
+ /* we started reading. can't change now. */
+ abort();
+ }
+#endif
+
+ hdr = serf_bucket_mem_alloc(bkt->allocator, sizeof(*hdr));
+ hdr->header_size = header_size;
+ hdr->value_size = value_size;
+ hdr->alloc_flags = 0;
+ hdr->next = NULL;
+
+ if (header_copy) {
+ hdr->header = serf_bstrmemdup(bkt->allocator, header, header_size);
+ hdr->alloc_flags |= ALLOC_HEADER;
+ }
+ else {
+ hdr->header = header;
+ }
+
+ if (value_copy) {
+ hdr->value = serf_bstrmemdup(bkt->allocator, value, value_size);
+ hdr->alloc_flags |= ALLOC_VALUE;
+ }
+ else {
+ hdr->value = value;
+ }
+
+ /* Add the new header at the end of the list. */
+ while (iter && iter->next) {
+ iter = iter->next;
+ }
+ if (iter)
+ iter->next = hdr;
+ else
+ ctx->list = hdr;
+}
+
+void serf_bucket_headers_set(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 0,
+ value, strlen(value), 1);
+}
+
+void serf_bucket_headers_setc(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 1,
+ value, strlen(value), 1);
+}
+
+void serf_bucket_headers_setn(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value)
+{
+ serf_bucket_headers_setx(headers_bucket,
+ header, strlen(header), 0,
+ value, strlen(value), 0);
+}
+
+const char *serf_bucket_headers_get(
+ serf_bucket_t *headers_bucket,
+ const char *header)
+{
+ headers_context_t *ctx = headers_bucket->data;
+ header_list_t *found = ctx->list;
+ const char *val = NULL;
+ int value_size = 0;
+ int val_alloc = 0;
+
+ while (found) {
+ if (strcasecmp(found->header, header) == 0) {
+ if (val) {
+ /* The header is already present. RFC 2616, section 4.2
+ indicates that we should append the new value, separated by
+ a comma. Reasoning: for headers whose values are known to
+ be comma-separated, that is clearly the correct behavior;
+ for others, the correct behavior is undefined anyway. */
+
+ /* The "+1" is for the comma; serf_bstrmemdup() will also add
+ one slot for the terminating '\0'. */
+ apr_size_t new_size = found->value_size + value_size + 1;
+ char *new_val = serf_bucket_mem_alloc(headers_bucket->allocator,
+ new_size);
+ memcpy(new_val, val, value_size);
+ new_val[value_size] = ',';
+ memcpy(new_val + value_size + 1, found->value,
+ found->value_size);
+ new_val[new_size] = '\0';
+ /* Copy the new value over the already existing value. */
+ if (val_alloc)
+ serf_bucket_mem_free(headers_bucket->allocator, (void*)val);
+ val_alloc |= ALLOC_VALUE;
+ val = new_val;
+ value_size = new_size;
+ }
+ else {
+ val = found->value;
+ value_size = found->value_size;
+ }
+ }
+ found = found->next;
+ }
+
+ return val;
+}
+
+void serf_bucket_headers_do(
+ serf_bucket_t *headers_bucket,
+ serf_bucket_headers_do_callback_fn_t func,
+ void *baton)
+{
+ headers_context_t *ctx = headers_bucket->data;
+ header_list_t *scan = ctx->list;
+
+ while (scan) {
+ if (func(baton, scan->header, scan->value) != 0) {
+ break;
+ }
+ scan = scan->next;
+ }
+}
+
+static void serf_headers_destroy_and_data(serf_bucket_t *bucket)
+{
+ headers_context_t *ctx = bucket->data;
+ header_list_t *scan = ctx->list;
+
+ while (scan) {
+ header_list_t *next_hdr = scan->next;
+
+ if (scan->alloc_flags & ALLOC_HEADER)
+ serf_bucket_mem_free(bucket->allocator, (void *)scan->header);
+ if (scan->alloc_flags & ALLOC_VALUE)
+ serf_bucket_mem_free(bucket->allocator, (void *)scan->value);
+ serf_bucket_mem_free(bucket->allocator, scan);
+
+ scan = next_hdr;
+ }
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static void select_value(
+ headers_context_t *ctx,
+ const char **value,
+ apr_size_t *len)
+{
+ const char *v;
+ apr_size_t l;
+
+ if (ctx->state == READ_START) {
+ if (ctx->list == NULL) {
+ /* No headers. Move straight to the TERM state. */
+ ctx->state = READ_TERM;
+ }
+ else {
+ ctx->state = READ_HEADER;
+ ctx->cur_read = ctx->list;
+ }
+ ctx->amt_read = 0;
+ }
+
+ switch (ctx->state) {
+ case READ_HEADER:
+ v = ctx->cur_read->header;
+ l = ctx->cur_read->header_size;
+ break;
+ case READ_SEP:
+ v = ": ";
+ l = 2;
+ break;
+ case READ_VALUE:
+ v = ctx->cur_read->value;
+ l = ctx->cur_read->value_size;
+ break;
+ case READ_CRLF:
+ case READ_TERM:
+ v = "\r\n";
+ l = 2;
+ break;
+ case READ_DONE:
+ *len = 0;
+ return;
+ default:
+ /* Not reachable */
+ return;
+ }
+
+ *value = v + ctx->amt_read;
+ *len = l - ctx->amt_read;
+}
+
+/* the current data chunk has been read/consumed. move our internal state. */
+static apr_status_t consume_chunk(headers_context_t *ctx)
+{
+ /* move to the next state, resetting the amount read. */
+ ++ctx->state;
+ ctx->amt_read = 0;
+
+ /* just sent the terminator and moved to DONE. signal completion. */
+ if (ctx->state == READ_DONE)
+ return APR_EOF;
+
+ /* end of this header. move to the next one. */
+ if (ctx->state == READ_TERM) {
+ ctx->cur_read = ctx->cur_read->next;
+ if (ctx->cur_read != NULL) {
+ /* We've got another head to send. Reset the read state. */
+ ctx->state = READ_HEADER;
+ }
+ /* else leave in READ_TERM */
+ }
+
+ /* there is more data which can be read immediately. */
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+
+ select_value(ctx, data, len);
+
+ /* already done or returning the CRLF terminator? return EOF */
+ if (ctx->state == READ_DONE || ctx->state == READ_TERM)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+ apr_size_t avail;
+
+ select_value(ctx, data, &avail);
+ if (ctx->state == READ_DONE)
+ return APR_EOF;
+
+ if (requested >= avail) {
+ /* return everything from this chunk */
+ *len = avail;
+
+ /* we consumed this chunk. advance the state. */
+ return consume_chunk(ctx);
+ }
+
+ /* return just the amount requested, and advance our pointer */
+ *len = requested;
+ ctx->amt_read += requested;
+
+ /* there is more that can be read immediately */
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_headers_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ headers_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ /* ### what behavior should we use here? APR_EGENERAL for now */
+ if ((acceptable & SERF_NEWLINE_CRLF) == 0)
+ return APR_EGENERAL;
+
+ /* get whatever is in this chunk */
+ select_value(ctx, data, len);
+ if (ctx->state == READ_DONE)
+ return APR_EOF;
+
+ /* we consumed this chunk. advance the state. */
+ status = consume_chunk(ctx);
+
+ /* the type of newline found is easy... */
+ *found = (ctx->state == READ_CRLF || ctx->state == READ_TERM)
+ ? SERF_NEWLINE_CRLF : SERF_NEWLINE_NONE;
+
+ return status;
+}
+
+static apr_status_t serf_headers_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ apr_size_t avail = requested;
+ int i;
+
+ *vecs_used = 0;
+
+ for (i = 0; i < vecs_size; i++) {
+ const char *data;
+ apr_size_t len;
+ apr_status_t status;
+
+ /* Calling read() would not be a safe opt in the general case, but it
+ * is here for the header bucket as it only frees all of the header
+ * keys and values when the entire bucket goes away - not on a
+ * per-read() basis as is normally the case.
+ */
+ status = serf_headers_read(bucket, avail, &data, &len);
+
+ if (len) {
+ vecs[*vecs_used].iov_base = (char*)data;
+ vecs[*vecs_used].iov_len = len;
+
+ (*vecs_used)++;
+
+ if (avail != SERF_READ_ALL_AVAIL) {
+ avail -= len;
+
+ /* If we reach 0, then read()'s status will suffice. */
+ if (avail == 0) {
+ return status;
+ }
+ }
+ }
+
+ if (status) {
+ return status;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+const serf_bucket_type_t serf_bucket_type_headers = {
+ "HEADERS",
+ serf_headers_read,
+ serf_headers_readline,
+ serf_headers_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_headers_peek,
+ serf_headers_destroy_and_data,
+};
diff --git a/buckets/iovec_buckets.c b/buckets/iovec_buckets.c
new file mode 100644
index 0000000..9ac1d8d
--- /dev/null
+++ b/buckets/iovec_buckets.c
@@ -0,0 +1,169 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ struct iovec *vecs;
+
+ /* Total number of buffer stored in the vecs var. */
+ int vecs_len;
+ /* Points to the first unread buffer. */
+ int current_vec;
+ /* First buffer offset. */
+ int offset;
+} iovec_context_t;
+
+serf_bucket_t *serf_bucket_iovec_create(
+ struct iovec vecs[],
+ int len,
+ serf_bucket_alloc_t *allocator)
+{
+ iovec_context_t *ctx;
+ int i;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->vecs = serf_bucket_mem_alloc(allocator, len * sizeof(struct iovec));
+ ctx->vecs_len = len;
+ ctx->current_vec = 0;
+ ctx->offset = 0;
+
+ /* copy all buffers to our iovec. */
+ for (i = 0; i < len; i++) {
+ ctx->vecs[i].iov_base = vecs[i].iov_base;
+ ctx->vecs[i].iov_len = vecs[i].iov_len;
+ }
+
+ return serf_bucket_create(&serf_bucket_type_iovec, allocator, ctx);
+}
+
+static apr_status_t serf_iovec_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t serf_iovec_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ *vecs_used = 0;
+
+ /* copy the requested amount of buffers to the provided iovec. */
+ for (; ctx->current_vec < ctx->vecs_len; ctx->current_vec++) {
+ struct iovec vec = ctx->vecs[ctx->current_vec];
+ apr_size_t remaining;
+
+ if (requested != SERF_READ_ALL_AVAIL && requested <= 0)
+ break;
+ if (*vecs_used >= vecs_size)
+ break;
+
+ vecs[*vecs_used].iov_base = (char*)vec.iov_base + ctx->offset;
+ remaining = vec.iov_len - ctx->offset;
+
+ /* Less bytes requested than remaining in the current buffer. */
+ if (requested != SERF_READ_ALL_AVAIL && requested < remaining) {
+ vecs[*vecs_used].iov_len = requested;
+ ctx->offset += requested;
+ requested = 0;
+ (*vecs_used)++;
+ break;
+ } else {
+ /* Copy the complete buffer. */
+ vecs[*vecs_used].iov_len = remaining;
+ ctx->offset = 0;
+ if (requested != SERF_READ_ALL_AVAIL)
+ requested -= remaining;
+ (*vecs_used)++;
+ }
+ }
+
+ if (ctx->current_vec == ctx->vecs_len && !ctx->offset)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_iovec_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ struct iovec vec[1];
+ apr_status_t status;
+ int vecs_used;
+
+ status = serf_iovec_read_iovec(bucket, requested, 1, vec, &vecs_used);
+
+ if (vecs_used) {
+ *data = vec[0].iov_base;
+ *len = vec[0].iov_len;
+ } else {
+ *len = 0;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_iovec_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ if (ctx->current_vec >= ctx->vecs_len) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ /* Return the first unread buffer, don't bother combining all
+ remaining data. */
+ *data = ctx->vecs[ctx->current_vec].iov_base;
+ *len = ctx->vecs[ctx->current_vec].iov_len;
+
+ if (ctx->current_vec + 1 == ctx->vecs_len)
+ return APR_EOF;
+
+ return APR_SUCCESS;
+}
+
+static void serf_iovec_destroy(serf_bucket_t *bucket)
+{
+ iovec_context_t *ctx = bucket->data;
+
+ serf_bucket_mem_free(bucket->allocator, ctx->vecs);
+ serf_default_destroy_and_data(bucket);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_iovec = {
+ "IOVEC",
+ serf_iovec_read,
+ serf_iovec_readline,
+ serf_iovec_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_iovec_peek,
+ serf_iovec_destroy,
+};
diff --git a/buckets/limit_buckets.c b/buckets/limit_buckets.c
new file mode 100644
index 0000000..d2e6166
--- /dev/null
+++ b/buckets/limit_buckets.c
@@ -0,0 +1,134 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+/* Older versions of APR do not have this macro. */
+#ifdef APR_SIZE_MAX
+#define REQUESTED_MAX APR_SIZE_MAX
+#else
+#define REQUESTED_MAX (~((apr_size_t)0))
+#endif
+
+
+typedef struct {
+ serf_bucket_t *stream;
+ apr_uint64_t remaining;
+} limit_context_t;
+
+
+serf_bucket_t *serf_bucket_limit_create(
+ serf_bucket_t *stream, apr_uint64_t len, serf_bucket_alloc_t *allocator)
+{
+ limit_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->remaining = len;
+
+ return serf_bucket_create(&serf_bucket_type_limit, allocator, ctx);
+}
+
+static apr_status_t serf_limit_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
+ if (ctx->remaining <= REQUESTED_MAX) {
+ requested = (apr_size_t) ctx->remaining;
+ } else {
+ requested = REQUESTED_MAX;
+ }
+ }
+
+ status = serf_bucket_read(ctx->stream, requested, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ /* If we have met our limit and don't have a status, return EOF. */
+ if (!ctx->remaining && !status) {
+ status = APR_EOF;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_limit_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+ apr_status_t status;
+
+ if (!ctx->remaining) {
+ *len = 0;
+ return APR_EOF;
+ }
+
+ status = serf_bucket_readline(ctx->stream, acceptable, found, data, len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ ctx->remaining -= *len;
+ }
+
+ /* If we have met our limit and don't have a status, return EOF. */
+ if (!ctx->remaining && !status) {
+ status = APR_EOF;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_limit_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ limit_context_t *ctx = bucket->data;
+
+ return serf_bucket_peek(ctx->stream, data, len);
+}
+
+static void serf_limit_destroy(serf_bucket_t *bucket)
+{
+ limit_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(ctx->stream);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+const serf_bucket_type_t serf_bucket_type_limit = {
+ "LIMIT",
+ serf_limit_read,
+ serf_limit_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_limit_peek,
+ serf_limit_destroy,
+};
diff --git a/buckets/mmap_buckets.c b/buckets/mmap_buckets.c
new file mode 100644
index 0000000..f55a76b
--- /dev/null
+++ b/buckets/mmap_buckets.c
@@ -0,0 +1,118 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_mmap.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ apr_mmap_t *mmap;
+ void *current;
+ apr_off_t offset;
+ apr_off_t remaining;
+} mmap_context_t;
+
+
+serf_bucket_t *serf_bucket_mmap_create(
+ apr_mmap_t *file_mmap,
+ serf_bucket_alloc_t *allocator)
+{
+ mmap_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->mmap = file_mmap;
+ ctx->current = NULL;
+ ctx->offset = 0;
+ ctx->remaining = ctx->mmap->size;
+
+ return serf_bucket_create(&serf_bucket_type_mmap, allocator, ctx);
+}
+
+static apr_status_t serf_mmap_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ mmap_context_t *ctx = bucket->data;
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) {
+ *len = ctx->remaining;
+ }
+ else {
+ *len = requested;
+ }
+
+ /* ### Would it be faster to call this once and do the offset ourselves? */
+ apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
+
+ /* For the next read... */
+ ctx->offset += *len;
+ ctx->remaining -= *len;
+
+ if (ctx->remaining == 0) {
+ return APR_EOF;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_mmap_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ mmap_context_t *ctx = bucket->data;
+ const char *end;
+
+ /* ### Would it be faster to call this once and do the offset ourselves? */
+ apr_mmap_offset((void**)data, ctx->mmap, ctx->offset);
+ end = *data;
+
+ /* XXX An overflow is generated if we pass &ctx->remaining to readline.
+ * Not real clear why.
+ */
+ *len = ctx->remaining;
+
+ serf_util_readline(&end, len, acceptable, found);
+
+ *len = end - *data;
+
+ ctx->offset += *len;
+ ctx->remaining -= *len;
+
+ if (ctx->remaining == 0) {
+ return APR_EOF;
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t serf_mmap_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Oh, bah. */
+ return APR_ENOTIMPL;
+}
+
+const serf_bucket_type_t serf_bucket_type_mmap = {
+ "MMAP",
+ serf_mmap_read,
+ serf_mmap_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_mmap_peek,
+ serf_default_destroy_and_data,
+};
diff --git a/buckets/request_buckets.c b/buckets/request_buckets.c
new file mode 100644
index 0000000..be010c0
--- /dev/null
+++ b/buckets/request_buckets.c
@@ -0,0 +1,228 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ const char *method;
+ const char *uri;
+ serf_bucket_t *headers;
+ serf_bucket_t *body;
+ apr_int64_t len;
+} request_context_t;
+
+#define LENGTH_UNKNOWN ((apr_int64_t)-1)
+
+
+serf_bucket_t *serf_bucket_request_create(
+ const char *method,
+ const char *URI,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ request_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->method = method;
+ ctx->uri = URI;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->body = body;
+ ctx->len = LENGTH_UNKNOWN;
+
+ return serf_bucket_create(&serf_bucket_type_request, allocator, ctx);
+}
+
+void serf_bucket_request_set_CL(
+ serf_bucket_t *bucket,
+ apr_int64_t len)
+{
+ request_context_t *ctx = (request_context_t *)bucket->data;
+
+ ctx->len = len;
+}
+
+serf_bucket_t *serf_bucket_request_get_headers(
+ serf_bucket_t *bucket)
+{
+ return ((request_context_t *)bucket->data)->headers;
+}
+
+void serf_bucket_request_set_root(
+ serf_bucket_t *bucket,
+ const char *root_url)
+{
+ request_context_t *ctx = (request_context_t *)bucket->data;
+
+ /* If uri is already absolute, don't change it. */
+ if (ctx->uri[0] != '/')
+ return;
+
+ /* If uri is '/' replace it with root_url. */
+ if (ctx->uri[1] == '\0')
+ ctx->uri = root_url;
+ else
+ ctx->uri =
+ apr_pstrcat(serf_bucket_allocator_get_pool(bucket->allocator),
+ root_url,
+ ctx->uri,
+ NULL);
+}
+
+static void serialize_data(serf_bucket_t *bucket)
+{
+ request_context_t *ctx = bucket->data;
+ serf_bucket_t *new_bucket;
+ const char *new_data;
+ struct iovec iov[4];
+ apr_size_t nbytes;
+
+ /* Serialize the request-line and headers into one mother string,
+ * and wrap a bucket around it.
+ */
+ iov[0].iov_base = (char*)ctx->method;
+ iov[0].iov_len = strlen(ctx->method);
+ iov[1].iov_base = " ";
+ iov[1].iov_len = sizeof(" ") - 1;
+ iov[2].iov_base = (char*)ctx->uri;
+ iov[2].iov_len = strlen(ctx->uri);
+ iov[3].iov_base = " HTTP/1.1\r\n";
+ iov[3].iov_len = sizeof(" HTTP/1.1\r\n") - 1;
+
+ /* ### pool allocation! */
+ new_data = apr_pstrcatv(serf_bucket_allocator_get_pool(bucket->allocator),
+ iov, 4, &nbytes);
+
+ /* Create a new bucket for this string. A free function isn't needed
+ * since the string is residing in a pool.
+ */
+ new_bucket = SERF_BUCKET_SIMPLE_STRING_LEN(new_data, nbytes,
+ bucket->allocator);
+
+ /* Build up the new bucket structure.
+ *
+ * Note that self needs to become an aggregate bucket so that a
+ * pointer to self still represents the "right" data.
+ */
+ serf_bucket_aggregate_become(bucket);
+
+ /* Insert the two buckets. */
+ serf_bucket_aggregate_append(bucket, new_bucket);
+ serf_bucket_aggregate_append(bucket, ctx->headers);
+
+ /* If we know the length, then use C-L and the raw body. Otherwise,
+ use chunked encoding for the request. */
+ if (ctx->len != LENGTH_UNKNOWN) {
+ char buf[30];
+ sprintf(buf, "%" APR_INT64_T_FMT, ctx->len);
+ serf_bucket_headers_set(ctx->headers, "Content-Length", buf);
+ if (ctx->body != NULL)
+ serf_bucket_aggregate_append(bucket, ctx->body);
+ }
+ else if (ctx->body != NULL) {
+ /* Morph the body bucket to a chunked encoding bucket for now. */
+ serf_bucket_headers_setn(ctx->headers, "Transfer-Encoding", "chunked");
+ ctx->body = serf_bucket_chunk_create(ctx->body, bucket->allocator);
+ serf_bucket_aggregate_append(bucket, ctx->body);
+ }
+
+ /* Our private context is no longer needed, and is not referred to by
+ * any existing bucket. Toss it.
+ */
+ serf_bucket_mem_free(bucket->allocator, ctx);
+}
+
+static apr_status_t serf_request_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read(bucket, requested, data, len);
+}
+
+static apr_status_t serf_request_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the readline. */
+ return serf_bucket_readline(bucket, acceptable, found, data, len);
+}
+
+static apr_status_t serf_request_read_iovec(serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the read. */
+ return serf_bucket_read_iovec(bucket, requested,
+ vecs_size, vecs, vecs_used);
+}
+
+static apr_status_t serf_request_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ /* Seralize our private data into a new aggregate bucket. */
+ serialize_data(bucket);
+
+ /* Delegate to the "new" aggregate bucket to do the peek. */
+ return serf_bucket_peek(bucket, data, len);
+}
+
+void serf_bucket_request_become(
+ serf_bucket_t *bucket,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body)
+{
+ request_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(bucket->allocator, sizeof(*ctx));
+ ctx->method = method;
+ ctx->uri = uri;
+ ctx->headers = serf_bucket_headers_create(bucket->allocator);
+ ctx->body = body;
+
+ bucket->type = &serf_bucket_type_request;
+ bucket->data = ctx;
+
+ /* The allocator remains the same. */
+}
+
+const serf_bucket_type_t serf_bucket_type_request = {
+ "REQUEST",
+ serf_request_read,
+ serf_request_readline,
+ serf_request_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_request_peek,
+ serf_default_destroy_and_data,
+};
+
diff --git a/buckets/response_buckets.c b/buckets/response_buckets.c
new file mode 100644
index 0000000..975fedc
--- /dev/null
+++ b/buckets/response_buckets.c
@@ -0,0 +1,429 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_lib.h>
+#include <apr_strings.h>
+#include <apr_date.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ serf_bucket_t *stream;
+ serf_bucket_t *body; /* Pointer to the stream wrapping the body. */
+ serf_bucket_t *headers; /* holds parsed headers */
+
+ enum {
+ STATE_STATUS_LINE, /* reading status line */
+ STATE_HEADERS, /* reading headers */
+ STATE_BODY, /* reading body */
+ STATE_TRAILERS, /* reading trailers */
+ STATE_DONE /* we've sent EOF */
+ } state;
+
+ /* Buffer for accumulating a line from the response. */
+ serf_linebuf_t linebuf;
+
+ serf_status_line sl;
+
+ int chunked; /* Do we need to read trailers? */
+ int head_req; /* Was this a HEAD request? */
+} response_context_t;
+
+
+serf_bucket_t *serf_bucket_response_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator)
+{
+ response_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->stream = stream;
+ ctx->body = NULL;
+ ctx->headers = serf_bucket_headers_create(allocator);
+ ctx->state = STATE_STATUS_LINE;
+ ctx->chunked = 0;
+ ctx->head_req = 0;
+
+ serf_linebuf_init(&ctx->linebuf);
+
+ return serf_bucket_create(&serf_bucket_type_response, allocator, ctx);
+}
+
+void serf_bucket_response_set_head(
+ serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ ctx->head_req = 1;
+}
+
+serf_bucket_t *serf_bucket_response_get_headers(
+ serf_bucket_t *bucket)
+{
+ return ((response_context_t *)bucket->data)->headers;
+}
+
+
+static void serf_response_destroy_and_data(serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ if (ctx->state != STATE_STATUS_LINE) {
+ serf_bucket_mem_free(bucket->allocator, (void*)ctx->sl.reason);
+ }
+
+ serf_bucket_destroy(ctx->stream);
+ if (ctx->body != NULL)
+ serf_bucket_destroy(ctx->body);
+ serf_bucket_destroy(ctx->headers);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static apr_status_t fetch_line(response_context_t *ctx, int acceptable)
+{
+ return serf_linebuf_fetch(&ctx->linebuf, ctx->stream, acceptable);
+}
+
+static apr_status_t parse_status_line(response_context_t *ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ int res;
+ char *reason; /* ### stupid APR interface makes this non-const */
+
+ /* ctx->linebuf.line should be of form: HTTP/1.1 200 OK */
+ res = apr_date_checkmask(ctx->linebuf.line, "HTTP/#.# ###*");
+ if (!res) {
+ /* Not an HTTP response? Well, at least we won't understand it. */
+ return SERF_ERROR_BAD_HTTP_RESPONSE;
+ }
+
+ ctx->sl.version = SERF_HTTP_VERSION(ctx->linebuf.line[5] - '0',
+ ctx->linebuf.line[7] - '0');
+ ctx->sl.code = apr_strtoi64(ctx->linebuf.line + 8, &reason, 10);
+
+ /* Skip leading spaces for the reason string. */
+ if (apr_isspace(*reason)) {
+ reason++;
+ }
+
+ /* Copy the reason value out of the line buffer. */
+ ctx->sl.reason = serf_bstrmemdup(allocator, reason,
+ ctx->linebuf.used
+ - (reason - ctx->linebuf.line));
+
+ return APR_SUCCESS;
+}
+
+/* This code should be replaced with header buckets. */
+static apr_status_t fetch_headers(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ /* Something was read. Process it. */
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) {
+ const char *end_key;
+ const char *c;
+
+ end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used);
+ if (!c) {
+ /* Bad headers? */
+ return SERF_ERROR_BAD_HTTP_RESPONSE;
+ }
+
+ /* Skip over initial ':' */
+ c++;
+
+ /* And skip all whitespaces. */
+ for(; c < ctx->linebuf.line + ctx->linebuf.used; c++)
+ {
+ if (!apr_isspace(*c))
+ {
+ break;
+ }
+ }
+
+ /* Always copy the headers (from the linebuf into new mem). */
+ /* ### we should be able to optimize some mem copies */
+ serf_bucket_headers_setx(
+ ctx->headers,
+ ctx->linebuf.line, end_key - ctx->linebuf.line, 1,
+ c, ctx->linebuf.line + ctx->linebuf.used - c, 1);
+ }
+
+ return status;
+}
+
+/* Perform one iteration of the state machine.
+ *
+ * Will return when one the following conditions occurred:
+ * 1) a state change
+ * 2) an error
+ * 3) the stream is not ready or at EOF
+ * 4) APR_SUCCESS, meaning the machine can be run again immediately
+ */
+static apr_status_t run_machine(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS; /* initialize to avoid gcc warnings */
+
+ switch (ctx->state) {
+ case STATE_STATUS_LINE:
+ /* RFC 2616 says that CRLF is the only line ending, but we can easily
+ * accept any kind of line ending.
+ */
+ status = fetch_line(ctx, SERF_NEWLINE_ANY);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (ctx->linebuf.state == SERF_LINEBUF_READY) {
+ /* The Status-Line is in the line buffer. Process it. */
+ status = parse_status_line(ctx, bkt->allocator);
+ if (status)
+ return status;
+
+ /* Good times ahead: we're switching protocols! */
+ if (ctx->sl.code == 101) {
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+ ctx->state = STATE_DONE;
+ break;
+ }
+
+ /* Okay... move on to reading the headers. */
+ ctx->state = STATE_HEADERS;
+ }
+ else {
+ /* The connection closed before we could get the next
+ * response. Treat the request as lost so that our upper
+ * end knows the server never tried to give us a response.
+ */
+ if (APR_STATUS_IS_EOF(status)) {
+ return SERF_ERROR_REQUEST_LOST;
+ }
+ }
+ break;
+ case STATE_HEADERS:
+ status = fetch_headers(bkt, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we hit the end of the headers.
+ * Move on to the body.
+ */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ const void *v;
+
+ /* Advance the state. */
+ ctx->state = STATE_BODY;
+
+ ctx->body =
+ serf_bucket_barrier_create(ctx->stream, bkt->allocator);
+
+ /* Are we C-L, chunked, or conn close? */
+ v = serf_bucket_headers_get(ctx->headers, "Content-Length");
+ if (v) {
+ apr_uint64_t length;
+ length = apr_strtoi64(v, NULL, 10);
+ if (errno == ERANGE) {
+ return APR_FROM_OS_ERROR(ERANGE);
+ }
+ ctx->body = serf_bucket_limit_create(ctx->body, length,
+ bkt->allocator);
+ }
+ else {
+ v = serf_bucket_headers_get(ctx->headers, "Transfer-Encoding");
+
+ /* Need to handle multiple transfer-encoding. */
+ if (v && strcasecmp("chunked", v) == 0) {
+ ctx->chunked = 1;
+ ctx->body = serf_bucket_dechunk_create(ctx->body,
+ bkt->allocator);
+ }
+
+ if (!v && (ctx->sl.code == 204 || ctx->sl.code == 304)) {
+ ctx->state = STATE_DONE;
+ }
+ }
+ v = serf_bucket_headers_get(ctx->headers, "Content-Encoding");
+ if (v) {
+ /* Need to handle multiple content-encoding. */
+ if (v && strcasecmp("gzip", v) == 0) {
+ ctx->body =
+ serf_bucket_deflate_create(ctx->body, bkt->allocator,
+ SERF_DEFLATE_GZIP);
+ }
+ else if (v && strcasecmp("deflate", v) == 0) {
+ ctx->body =
+ serf_bucket_deflate_create(ctx->body, bkt->allocator,
+ SERF_DEFLATE_DEFLATE);
+ }
+ }
+ /* If we're a HEAD request, we don't receive a body. */
+ if (ctx->head_req) {
+ ctx->state = STATE_DONE;
+ }
+ }
+ break;
+ case STATE_BODY:
+ /* Don't do anything. */
+ break;
+ case STATE_TRAILERS:
+ status = fetch_headers(bkt, ctx);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* If an empty line was read, then we're done. */
+ if (ctx->linebuf.state == SERF_LINEBUF_READY && !ctx->linebuf.used) {
+ ctx->state = STATE_DONE;
+ return APR_EOF;
+ }
+ break;
+ case STATE_DONE:
+ return APR_EOF;
+ default:
+ /* Not reachable */
+ return APR_EGENERAL;
+ }
+
+ return status;
+}
+
+static apr_status_t wait_for_body(serf_bucket_t *bkt, response_context_t *ctx)
+{
+ apr_status_t status;
+
+ /* Keep reading and moving through states if we aren't at the BODY */
+ while (ctx->state != STATE_BODY) {
+ status = run_machine(bkt, ctx);
+
+ /* Anything other than APR_SUCCESS means that we cannot immediately
+ * read again (for now).
+ */
+ if (status)
+ return status;
+ }
+ /* in STATE_BODY */
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_bucket_response_wait_for_headers(
+ serf_bucket_t *bucket)
+{
+ response_context_t *ctx = bucket->data;
+
+ return wait_for_body(bucket, ctx);
+}
+
+apr_status_t serf_bucket_response_status(
+ serf_bucket_t *bkt,
+ serf_status_line *sline)
+{
+ response_context_t *ctx = bkt->data;
+ apr_status_t status;
+
+ if (ctx->state != STATE_STATUS_LINE) {
+ /* We already read it and moved on. Just return it. */
+ *sline = ctx->sl;
+ return APR_SUCCESS;
+ }
+
+ /* Running the state machine once will advance the machine, or state
+ * that the stream isn't ready with enough data. There isn't ever a
+ * need to run the machine more than once to try and satisfy this. We
+ * have to look at the state to tell whether it advanced, though, as
+ * it is quite possible to advance *and* to return APR_EAGAIN.
+ */
+ status = run_machine(bkt, ctx);
+ if (ctx->state == STATE_HEADERS) {
+ *sline = ctx->sl;
+ }
+ else {
+ /* Indicate that we don't have the information yet. */
+ sline->version = 0;
+ }
+
+ return status;
+}
+
+static apr_status_t serf_response_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ response_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ /* It's not possible to have read anything yet! */
+ if (APR_STATUS_IS_EOF(rv) || APR_STATUS_IS_EAGAIN(rv)) {
+ *len = 0;
+ }
+ return rv;
+ }
+
+ rv = serf_bucket_read(ctx->body, requested, data, len);
+ if (APR_STATUS_IS_EOF(rv)) {
+ if (ctx->chunked) {
+ ctx->state = STATE_TRAILERS;
+ /* Mask the result. */
+ rv = APR_SUCCESS;
+ }
+ else {
+ ctx->state = STATE_DONE;
+ }
+ }
+ return rv;
+}
+
+static apr_status_t serf_response_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ response_context_t *ctx = bucket->data;
+ apr_status_t rv;
+
+ rv = wait_for_body(bucket, ctx);
+ if (rv) {
+ return rv;
+ }
+
+ /* Delegate to the stream bucket to do the readline. */
+ return serf_bucket_readline(ctx->body, acceptable, found, data, len);
+}
+
+/* ### need to implement */
+#define serf_response_peek NULL
+
+const serf_bucket_type_t serf_bucket_type_response = {
+ "RESPONSE",
+ serf_response_read,
+ serf_response_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_response_peek,
+ serf_response_destroy_and_data,
+};
diff --git a/buckets/simple_buckets.c b/buckets/simple_buckets.c
new file mode 100644
index 0000000..f36239b
--- /dev/null
+++ b/buckets/simple_buckets.c
@@ -0,0 +1,142 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ const char *original;
+ const char *current;
+ apr_size_t remaining;
+
+ serf_simple_freefunc_t freefunc;
+ void *baton;
+
+} simple_context_t;
+
+
+static void free_copied_data(void *baton, const char *data)
+{
+ serf_bucket_mem_free(baton, (char*)data);
+}
+
+serf_bucket_t *serf_bucket_simple_create(
+ const char *data,
+ apr_size_t len,
+ serf_simple_freefunc_t freefunc,
+ void *freefunc_baton,
+ serf_bucket_alloc_t *allocator)
+{
+ simple_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->original = ctx->current = data;
+ ctx->remaining = len;
+ ctx->freefunc = freefunc;
+ ctx->baton = freefunc_baton;
+
+ return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
+}
+
+serf_bucket_t *serf_bucket_simple_copy_create(
+ const char *data, apr_size_t len,
+ serf_bucket_alloc_t *allocator)
+{
+ simple_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+
+ ctx->original = ctx->current = serf_bucket_mem_alloc(allocator, len);
+ memcpy((char*)ctx->original, data, len);
+
+ ctx->remaining = len;
+ ctx->freefunc = free_copied_data;
+ ctx->baton = allocator;
+
+ return serf_bucket_create(&serf_bucket_type_simple, allocator, ctx);
+}
+
+static apr_status_t serf_simple_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining)
+ requested = ctx->remaining;
+
+ *data = ctx->current;
+ *len = requested;
+
+ ctx->current += requested;
+ ctx->remaining -= requested;
+
+ return ctx->remaining ? APR_SUCCESS : APR_EOF;
+}
+
+static apr_status_t serf_simple_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ /* Returned data will be from current position. */
+ *data = ctx->current;
+ serf_util_readline(&ctx->current, &ctx->remaining, acceptable, found);
+
+ /* See how much ctx->current moved forward. */
+ *len = ctx->current - *data;
+
+ return ctx->remaining ? APR_SUCCESS : APR_EOF;
+}
+
+static apr_status_t serf_simple_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ simple_context_t *ctx = bucket->data;
+
+ /* return whatever we have left */
+ *data = ctx->current;
+ *len = ctx->remaining;
+
+ /* we returned everything this bucket will ever hold */
+ return APR_EOF;
+}
+
+static void serf_simple_destroy(serf_bucket_t *bucket)
+{
+ simple_context_t *ctx = bucket->data;
+
+ if (ctx->freefunc)
+ (*ctx->freefunc)(ctx->baton, ctx->original);
+
+ serf_default_destroy_and_data(bucket);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_simple = {
+ "SIMPLE",
+ serf_simple_read,
+ serf_simple_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_simple_peek,
+ serf_simple_destroy,
+};
diff --git a/buckets/socket_buckets.c b/buckets/socket_buckets.c
new file mode 100644
index 0000000..dd2469a
--- /dev/null
+++ b/buckets/socket_buckets.c
@@ -0,0 +1,114 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_network_io.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+
+typedef struct {
+ apr_socket_t *skt;
+
+ serf_databuf_t databuf;
+
+ /* Progress callback */
+ serf_progress_t progress_func;
+ void *progress_baton;
+} socket_context_t;
+
+
+static apr_status_t socket_reader(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ socket_context_t *ctx = baton;
+ apr_status_t status;
+
+ *len = bufsize;
+ status = apr_socket_recv(ctx->skt, buf, len);
+
+ if (ctx->progress_func)
+ ctx->progress_func(ctx->progress_baton, *len, 0);
+
+ return status;
+}
+
+serf_bucket_t *serf_bucket_socket_create(
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator)
+{
+ socket_context_t *ctx;
+
+ /* Oh, well. */
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ ctx->skt = skt;
+
+ serf_databuf_init(&ctx->databuf);
+ ctx->databuf.read = socket_reader;
+ ctx->databuf.read_baton = ctx;
+
+ ctx->progress_func = ctx->progress_baton = NULL;
+ return serf_bucket_create(&serf_bucket_type_socket, allocator, ctx);
+}
+
+void serf_bucket_socket_set_read_progress_cb(
+ serf_bucket_t *bucket,
+ const serf_progress_t progress_func,
+ void *progress_baton)
+{
+ socket_context_t *ctx = bucket->data;
+
+ ctx->progress_func = progress_func;
+ ctx->progress_baton = progress_baton;
+}
+
+static apr_status_t serf_socket_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(&ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_socket_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data, apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(&ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_socket_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ socket_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(&ctx->databuf, data, len);
+}
+
+const serf_bucket_type_t serf_bucket_type_socket = {
+ "SOCKET",
+ serf_socket_read,
+ serf_socket_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_socket_peek,
+ serf_default_destroy_and_data,
+};
diff --git a/buckets/ssl_buckets.c b/buckets/ssl_buckets.c
new file mode 100644
index 0000000..3f43543
--- /dev/null
+++ b/buckets/ssl_buckets.c
@@ -0,0 +1,1629 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ----
+ *
+ * For the OpenSSL thread-safety locking code:
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Originally developed by Aaron Bannert and Justin Erenkrantz, eBuilt.
+ */
+
+#include <apr_pools.h>
+#include <apr_network_io.h>
+#include <apr_portable.h>
+#include <apr_strings.h>
+#include <apr_base64.h>
+#include <apr_version.h>
+#include <apr_atomic.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include <openssl/bio.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/pkcs12.h>
+#include <openssl/x509v3.h>
+
+#ifndef APR_VERSION_AT_LEAST /* Introduced in APR 1.3.0 */
+#define APR_VERSION_AT_LEAST(major,minor,patch) \
+ (((major) < APR_MAJOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) < APR_MINOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) == APR_MINOR_VERSION && \
+ (patch) <= APR_PATCH_VERSION))
+#endif /* APR_VERSION_AT_LEAST */
+
+#ifndef APR_ARRAY_PUSH
+#define APR_ARRAY_PUSH(ary,type) (*((type *)apr_array_push(ary)))
+#endif
+
+
+/*#define SSL_VERBOSE*/
+
+/*
+ * Here's an overview of the SSL bucket's relationship to OpenSSL and serf.
+ *
+ * HTTP request: SSLENCRYPT(REQUEST)
+ * [context.c reads from SSLENCRYPT and writes out to the socket]
+ * HTTP response: RESPONSE(SSLDECRYPT(SOCKET))
+ * [handler function reads from RESPONSE which in turn reads from SSLDECRYPT]
+ *
+ * HTTP request read call path:
+ *
+ * write_to_connection
+ * |- serf_bucket_read on SSLENCRYPT
+ * |- serf_ssl_read
+ * |- serf_databuf_read
+ * |- common_databuf_prep
+ * |- ssl_encrypt
+ * |- 1. Try to read pending encrypted data; If available, return.
+ * |- 2. Try to read from ctx->stream [REQUEST bucket]
+ * |- 3. Call SSL_write with read data
+ * |- ...
+ * |- bio_bucket_read can be called
+ * |- bio_bucket_write with encrypted data
+ * |- store in sink
+ * |- 4. If successful, read pending encrypted data and return.
+ * |- 5. If fails, place read data back in ctx->stream
+ *
+ * HTTP response read call path:
+ *
+ * read_from_connection
+ * |- acceptor
+ * |- handler
+ * |- ...
+ * |- serf_bucket_read(SSLDECRYPT)
+ * |- serf_ssl_read
+ * |- serf_databuf_read
+ * |- ssl_decrypt
+ * |- 1. SSL_read() for pending decrypted data; if any, return.
+ * |- 2. Try to read from ctx->stream [SOCKET bucket]
+ * |- 3. Append data to ssl_ctx->source
+ * |- 4. Call SSL_read()
+ * |- ...
+ * |- bio_bucket_write can be called
+ * |- bio_bucket_read
+ * |- read data from ssl_ctx->source
+ * |- If data read, return it.
+ * |- If an error, set the STATUS value and return.
+ *
+ */
+
+typedef struct bucket_list {
+ serf_bucket_t *bucket;
+ struct bucket_list *next;
+} bucket_list_t;
+
+typedef struct {
+ /* Helper to read data. Wraps stream. */
+ serf_databuf_t databuf;
+
+ /* Our source for more data. */
+ serf_bucket_t *stream;
+
+ /* The next set of buckets */
+ bucket_list_t *stream_next;
+
+ /* The status of the last thing we read. */
+ apr_status_t status;
+ apr_status_t exhausted;
+ int exhausted_reset;
+
+ /* Data we've read but not processed. */
+ serf_bucket_t *pending;
+} serf_ssl_stream_t;
+
+struct serf_ssl_context_t {
+ /* How many open buckets refer to this context. */
+ int refcount;
+
+ /* The pool that this context uses. */
+ apr_pool_t *pool;
+
+ /* The allocator associated with the above pool. */
+ serf_bucket_alloc_t *allocator;
+
+ /* Internal OpenSSL parameters */
+ SSL_CTX *ctx;
+ SSL *ssl;
+ BIO *bio;
+
+ serf_ssl_stream_t encrypt;
+ serf_ssl_stream_t decrypt;
+
+ /* Client cert callbacks */
+ serf_ssl_need_client_cert_t cert_callback;
+ void *cert_userdata;
+ apr_pool_t *cert_cache_pool;
+ const char *cert_file_success;
+
+ /* Client cert PW callbacks */
+ serf_ssl_need_cert_password_t cert_pw_callback;
+ void *cert_pw_userdata;
+ apr_pool_t *cert_pw_cache_pool;
+ const char *cert_pw_success;
+
+ /* Server cert callbacks */
+ serf_ssl_need_server_cert_t server_cert_callback;
+ serf_ssl_server_cert_chain_cb_t server_cert_chain_callback;
+ void *server_cert_userdata;
+
+ const char *cert_path;
+
+ X509 *cached_cert;
+ EVP_PKEY *cached_cert_pw;
+
+ apr_status_t pending_err;
+};
+
+typedef struct {
+ /* The bucket-independent ssl context that this bucket is associated with */
+ serf_ssl_context_t *ssl_ctx;
+
+ /* Pointer to the 'right' databuf. */
+ serf_databuf_t *databuf;
+
+ /* Pointer to our stream, so we can find it later. */
+ serf_bucket_t **our_stream;
+} ssl_context_t;
+
+struct serf_ssl_certificate_t {
+ X509 *ssl_cert;
+ int depth;
+};
+
+/* Returns the amount read. */
+static int bio_bucket_read(BIO *bio, char *in, int inlen)
+{
+ serf_ssl_context_t *ctx = bio->ptr;
+ const char *data;
+ apr_status_t status;
+ apr_size_t len;
+
+#ifdef SSL_VERBOSE
+ printf("bio_bucket_read called for %d bytes\n", inlen);
+#endif
+
+ if (ctx->encrypt.status == SERF_ERROR_WAIT_CONN
+ && BIO_should_read(ctx->bio)) {
+#ifdef SSL_VERBOSE
+ printf("bio_bucket_read waiting: (%d %d %d)\n",
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ /* Falling back... */
+ ctx->encrypt.exhausted_reset = 1;
+ BIO_clear_retry_flags(bio);
+ }
+
+ status = serf_bucket_read(ctx->decrypt.pending, inlen, &data, &len);
+
+ ctx->decrypt.status = status;
+#ifdef SSL_VERBOSE
+ printf("bio_bucket_read received %d bytes (%d)\n", len, status);
+#endif
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ /* Oh suck. */
+ if (len) {
+ memcpy(in, data, len);
+ return len;
+ }
+ if (APR_STATUS_IS_EOF(status)) {
+ BIO_set_retry_read(bio);
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+/* Returns the amount written. */
+static int bio_bucket_write(BIO *bio, const char *in, int inl)
+{
+ serf_ssl_context_t *ctx = bio->ptr;
+ serf_bucket_t *tmp;
+
+#ifdef SSL_VERBOSE
+ printf("bio_bucket_write called for %d bytes\n", inl);
+#endif
+ if (ctx->encrypt.status == SERF_ERROR_WAIT_CONN
+ && !BIO_should_read(ctx->bio)) {
+#ifdef SSL_VERBOSE
+ printf("bio_bucket_write waiting: (%d %d %d)\n",
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ /* Falling back... */
+ ctx->encrypt.exhausted_reset = 1;
+ BIO_clear_retry_flags(bio);
+ }
+
+ tmp = serf_bucket_simple_copy_create(in, inl,
+ ctx->encrypt.pending->allocator);
+
+ serf_bucket_aggregate_append(ctx->encrypt.pending, tmp);
+
+ return inl;
+}
+
+/* Returns the amount read. */
+static int bio_file_read(BIO *bio, char *in, int inlen)
+{
+ apr_file_t *file = bio->ptr;
+ apr_status_t status;
+ apr_size_t len;
+
+ BIO_clear_retry_flags(bio);
+
+ len = inlen;
+ status = apr_file_read(file, in, &len);
+
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ /* Oh suck. */
+ if (APR_STATUS_IS_EOF(status)) {
+ BIO_set_retry_read(bio);
+ return -1;
+ } else {
+ return len;
+ }
+ }
+
+ return -1;
+}
+
+/* Returns the amount written. */
+static int bio_file_write(BIO *bio, const char *in, int inl)
+{
+ apr_file_t *file = bio->ptr;
+ apr_size_t nbytes;
+
+ BIO_clear_retry_flags(bio);
+
+ nbytes = inl;
+ apr_file_write(file, in, &nbytes);
+
+ return nbytes;
+}
+
+static int bio_file_gets(BIO *bio, char *in, int inlen)
+{
+ return bio_file_read(bio, in, inlen);
+}
+
+static int bio_bucket_create(BIO *bio)
+{
+ bio->shutdown = 1;
+ bio->init = 1;
+ bio->num = -1;
+ bio->ptr = NULL;
+
+ return 1;
+}
+
+static int bio_bucket_destroy(BIO *bio)
+{
+ /* Did we already free this? */
+ if (bio == NULL) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static long bio_bucket_ctrl(BIO *bio, int cmd, long num, void *ptr)
+{
+ long ret = 1;
+
+ switch (cmd) {
+ default:
+ /* abort(); */
+ break;
+ case BIO_CTRL_FLUSH:
+ /* At this point we can't force a flush. */
+ break;
+ case BIO_CTRL_PUSH:
+ case BIO_CTRL_POP:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static BIO_METHOD bio_bucket_method = {
+ BIO_TYPE_MEM,
+ "Serf SSL encryption and decryption buckets",
+ bio_bucket_write,
+ bio_bucket_read,
+ NULL, /* Is this called? */
+ NULL, /* Is this called? */
+ bio_bucket_ctrl,
+ bio_bucket_create,
+ bio_bucket_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+static BIO_METHOD bio_file_method = {
+ BIO_TYPE_FILE,
+ "Wrapper around APR file structures",
+ bio_file_write,
+ bio_file_read,
+ NULL, /* Is this called? */
+ bio_file_gets, /* Is this called? */
+ bio_bucket_ctrl,
+ bio_bucket_create,
+ bio_bucket_destroy,
+#ifdef OPENSSL_VERSION_NUMBER
+ NULL /* sslc does not have the callback_ctrl field */
+#endif
+};
+
+static int
+validate_server_certificate(int cert_valid, X509_STORE_CTX *store_ctx)
+{
+ SSL *ssl;
+ serf_ssl_context_t *ctx;
+ X509 *server_cert;
+ int err, depth;
+ int failures = 0;
+
+ ssl = X509_STORE_CTX_get_ex_data(store_ctx,
+ SSL_get_ex_data_X509_STORE_CTX_idx());
+ ctx = SSL_get_app_data(ssl);
+
+ server_cert = X509_STORE_CTX_get_current_cert(store_ctx);
+ depth = X509_STORE_CTX_get_error_depth(store_ctx);
+
+ /* If the certification was found invalid, get the error and convert it to
+ something our caller will understand. */
+ if (! cert_valid) {
+ err = X509_STORE_CTX_get_error(store_ctx);
+
+ switch(err) {
+ case X509_V_ERR_CERT_NOT_YET_VALID:
+ failures |= SERF_SSL_CERT_NOTYETVALID;
+ break;
+ case X509_V_ERR_CERT_HAS_EXPIRED:
+ failures |= SERF_SSL_CERT_EXPIRED;
+ break;
+ case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT:
+ case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN:
+ failures |= SERF_SSL_CERT_SELF_SIGNED;
+ break;
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY:
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
+ case X509_V_ERR_CERT_UNTRUSTED:
+ case X509_V_ERR_INVALID_CA:
+ failures |= SERF_SSL_CERT_UNKNOWNCA;
+ break;
+ default:
+ failures |= SERF_SSL_CERT_UNKNOWN_FAILURE;
+ break;
+ }
+ }
+
+ /* Check certificate expiry dates. */
+ if (X509_cmp_current_time(X509_get_notBefore(server_cert)) >= 0) {
+ failures |= SERF_SSL_CERT_NOTYETVALID;
+ }
+ else if (X509_cmp_current_time(X509_get_notAfter(server_cert)) <= 0) {
+ failures |= SERF_SSL_CERT_EXPIRED;
+ }
+
+ if (ctx->server_cert_callback &&
+ (depth == 0 || failures)) {
+ apr_status_t status;
+ serf_ssl_certificate_t *cert;
+ apr_pool_t *subpool;
+
+ apr_pool_create(&subpool, ctx->pool);
+
+ cert = apr_palloc(subpool, sizeof(serf_ssl_certificate_t));
+ cert->ssl_cert = server_cert;
+ cert->depth = depth;
+
+ /* Callback for further verification. */
+ status = ctx->server_cert_callback(ctx->server_cert_userdata,
+ failures, cert);
+ if (status == APR_SUCCESS)
+ cert_valid = 1;
+ else
+ /* Pass the error back to the caller through the context-run. */
+ ctx->pending_err = status;
+ apr_pool_destroy(subpool);
+ }
+
+ if (ctx->server_cert_chain_callback
+ && (depth == 0 || failures)) {
+ apr_status_t status;
+ STACK_OF(X509) *chain;
+ const serf_ssl_certificate_t **certs;
+ int certs_len;
+ apr_pool_t *subpool;
+
+ apr_pool_create(&subpool, ctx->pool);
+
+ /* Borrow the chain to pass to the callback. */
+ chain = X509_STORE_CTX_get_chain(store_ctx);
+
+ /* If the chain can't be retrieved, just pass the current
+ certificate. */
+ /* ### can this actually happen with _get_chain() ? */
+ if (!chain) {
+ serf_ssl_certificate_t *cert = apr_palloc(subpool, sizeof(*cert));
+
+ cert->ssl_cert = server_cert;
+ cert->depth = depth;
+
+ /* Room for the server_cert and a trailing NULL. */
+ certs = apr_palloc(subpool, sizeof(*certs) * 2);
+ certs[0] = cert;
+
+ certs_len = 1;
+ } else {
+ int i;
+
+ certs_len = sk_X509_num(chain);
+
+ /* Room for all the certs and a trailing NULL. */
+ certs = apr_palloc(subpool, sizeof(*certs) * (certs_len + 1));
+ for (i = 0; i < certs_len; ++i) {
+ serf_ssl_certificate_t *cert;
+
+ cert = apr_palloc(subpool, sizeof(*cert));
+ cert->ssl_cert = sk_X509_value(chain, i);
+ cert->depth = i;
+
+ certs[i] = cert;
+ }
+ }
+ certs[certs_len] = NULL;
+
+ /* Callback for further verification. */
+ status = ctx->server_cert_chain_callback(ctx->server_cert_userdata,
+ failures, depth,
+ certs, certs_len);
+ if (status == APR_SUCCESS) {
+ cert_valid = 1;
+ } else {
+ /* Pass the error back to the caller through the context-run. */
+ ctx->pending_err = status;
+ }
+
+ apr_pool_destroy(subpool);
+ }
+
+ return cert_valid;
+}
+
+/* This function reads an encrypted stream and returns the decrypted stream. */
+static apr_status_t ssl_decrypt(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ serf_ssl_context_t *ctx = baton;
+ apr_size_t priv_len;
+ apr_status_t status;
+ const char *data;
+ int ssl_len;
+
+#ifdef SSL_VERBOSE
+ printf("ssl_decrypt: begin %d\n", bufsize);
+#endif
+
+ /* Is there some data waiting to be read? */
+ ssl_len = SSL_read(ctx->ssl, buf, bufsize);
+ if (ssl_len > 0) {
+#ifdef SSL_VERBOSE
+ printf("ssl_decrypt: %d bytes (%d); status: %d; flags: %d\n",
+ ssl_len, bufsize, ctx->decrypt.status,
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ *len = ssl_len;
+ return APR_SUCCESS;
+ }
+
+ status = serf_bucket_read(ctx->decrypt.stream, bufsize, &data, &priv_len);
+
+ if (!SERF_BUCKET_READ_ERROR(status) && priv_len) {
+ serf_bucket_t *tmp;
+
+#ifdef SSL_VERBOSE
+ printf("ssl_decrypt: read %d bytes (%d); status: %d\n", priv_len,
+ bufsize, status);
+#endif
+
+ tmp = serf_bucket_simple_copy_create(data, priv_len,
+ ctx->decrypt.pending->allocator);
+
+ serf_bucket_aggregate_append(ctx->decrypt.pending, tmp);
+
+ ssl_len = SSL_read(ctx->ssl, buf, bufsize);
+ if (ssl_len < 0) {
+ int ssl_err;
+
+ ssl_err = SSL_get_error(ctx->ssl, ssl_len);
+ switch (ssl_err) {
+ case SSL_ERROR_SYSCALL:
+ *len = 0;
+ status = ctx->decrypt.status;
+ break;
+ case SSL_ERROR_WANT_READ:
+ *len = 0;
+ status = APR_EAGAIN;
+ break;
+ case SSL_ERROR_SSL:
+ *len = 0;
+ status = ctx->pending_err ? ctx->pending_err : APR_EGENERAL;
+ ctx->pending_err = 0;
+ break;
+ default:
+ *len = 0;
+ status = APR_EGENERAL;
+ break;
+ }
+ }
+ else {
+ *len = ssl_len;
+#ifdef SSL_VERBOSE
+ printf("---\n%s\n-(%d)-\n", buf, *len);
+#endif
+ }
+ }
+ else {
+ *len = 0;
+ }
+#ifdef SSL_VERBOSE
+ printf("ssl_decrypt: %d %d %d\n", status, *len,
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ return status;
+}
+
+/* This function reads a decrypted stream and returns an encrypted stream. */
+static apr_status_t ssl_encrypt(void *baton, apr_size_t bufsize,
+ char *buf, apr_size_t *len)
+{
+ const char *data;
+ apr_size_t interim_bufsize;
+ serf_ssl_context_t *ctx = baton;
+ apr_status_t status;
+
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: begin %d\n", bufsize);
+#endif
+
+ /* Try to read already encrypted but unread data first. */
+ status = serf_bucket_read(ctx->encrypt.pending, bufsize, &data, len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ /* Aha, we read something. Return that now. */
+ if (*len) {
+ memcpy(buf, data, *len);
+ if (APR_STATUS_IS_EOF(status)) {
+ status = APR_SUCCESS;
+ }
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: %d %d %d (quick read)\n", status, *len,
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ return status;
+ }
+
+ if (BIO_should_retry(ctx->bio) && BIO_should_write(ctx->bio)) {
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: %d %d %d (should write exit)\n", status, *len,
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ return APR_EAGAIN;
+ }
+
+ /* If we were previously blocked, unblock ourselves now. */
+ if (BIO_should_read(ctx->bio)) {
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: reset %d %d (%d %d %d)\n", status,
+ ctx->encrypt.status,
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ ctx->encrypt.status = APR_SUCCESS;
+ ctx->encrypt.exhausted_reset = 0;
+ }
+
+ /* Oh well, read from our stream now. */
+ interim_bufsize = bufsize;
+ do {
+ apr_size_t interim_len;
+
+ if (!ctx->encrypt.status) {
+ struct iovec vecs[64];
+ int vecs_read;
+
+ status = serf_bucket_read_iovec(ctx->encrypt.stream,
+ interim_bufsize, 64, vecs,
+ &vecs_read);
+
+ if (!SERF_BUCKET_READ_ERROR(status) && vecs_read) {
+ char *vecs_data;
+ int i, cur, vecs_data_len;
+ int ssl_len;
+
+ /* Combine the buffers of the iovec into one buffer, as
+ that is with SSL_write requires. */
+ vecs_data_len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ vecs_data_len += vecs[i].iov_len;
+ }
+
+ vecs_data = serf_bucket_mem_alloc(ctx->allocator,
+ vecs_data_len);
+
+ cur = 0;
+ for (i = 0; i < vecs_read; i++) {
+ memcpy(vecs_data + cur, vecs[i].iov_base, vecs[i].iov_len);
+ cur += vecs[i].iov_len;
+ }
+
+ interim_bufsize -= vecs_data_len;
+ interim_len = vecs_data_len;
+
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: bucket read %d bytes; status %d\n",
+ interim_len, status);
+ printf("---\n%s\n-(%d)-\n", vecs_data, interim_len);
+#endif
+ /* Stash our status away. */
+ ctx->encrypt.status = status;
+
+ ssl_len = SSL_write(ctx->ssl, vecs_data, interim_len);
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: SSL write: %d\n", ssl_len);
+#endif
+ /* We're done. */
+ serf_bucket_mem_free(ctx->allocator, vecs_data);
+
+ /* If we failed to write... */
+ if (ssl_len < 0) {
+ int ssl_err;
+
+ /* Ah, bugger. We need to put that data back. */
+ serf_bucket_aggregate_prepend_iovec(ctx->encrypt.stream,
+ vecs, vecs_read);
+
+ ssl_err = SSL_get_error(ctx->ssl, ssl_len);
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: SSL write error: %d\n", ssl_err);
+#endif
+ if (ssl_err == SSL_ERROR_SYSCALL) {
+ status = ctx->encrypt.status;
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ }
+ else {
+ /* Oh, no. */
+ if (ssl_err == SSL_ERROR_WANT_READ) {
+ status = SERF_ERROR_WAIT_CONN;
+ }
+ else {
+ status = APR_EGENERAL;
+ }
+ }
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt: SSL write error: %d %d\n", status, *len);
+#endif
+ }
+ }
+ }
+ else {
+ interim_len = 0;
+ *len = 0;
+ status = ctx->encrypt.status;
+ }
+
+ } while (!status && interim_bufsize);
+
+ /* Okay, we exhausted our underlying stream. */
+ if (!SERF_BUCKET_READ_ERROR(status)) {
+ apr_status_t agg_status;
+ struct iovec vecs[64];
+ int vecs_read, i;
+
+ /* We read something! */
+ agg_status = serf_bucket_read_iovec(ctx->encrypt.pending, bufsize,
+ 64, vecs, &vecs_read);
+ *len = 0;
+ for (i = 0; i < vecs_read; i++) {
+ memcpy(buf + *len, vecs[i].iov_base, vecs[i].iov_len);
+ *len += vecs[i].iov_len;
+ }
+
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt read agg: %d %d %d %d\n", status, agg_status,
+ ctx->encrypt.status, *len);
+#endif
+
+ if (!agg_status) {
+ status = agg_status;
+ }
+ }
+
+ if (status == SERF_ERROR_WAIT_CONN
+ && BIO_should_retry(ctx->bio) && BIO_should_read(ctx->bio)) {
+ ctx->encrypt.exhausted = ctx->encrypt.status;
+ ctx->encrypt.status = SERF_ERROR_WAIT_CONN;
+ }
+
+#ifdef SSL_VERBOSE
+ printf("ssl_encrypt finished: %d %d (%d %d %d)\n", status, *len,
+ BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio),
+ BIO_get_retry_flags(ctx->bio));
+#endif
+ return status;
+}
+
+#if APR_HAS_THREADS
+static apr_pool_t *ssl_pool;
+static apr_thread_mutex_t **ssl_locks;
+
+typedef struct CRYPTO_dynlock_value {
+ apr_thread_mutex_t *lock;
+} CRYPTO_dynlock_value;
+
+static CRYPTO_dynlock_value *ssl_dyn_create(const char* file, int line)
+{
+ CRYPTO_dynlock_value *l;
+ apr_status_t rv;
+
+ l = apr_palloc(ssl_pool, sizeof(CRYPTO_dynlock_value));
+ rv = apr_thread_mutex_create(&l->lock, APR_THREAD_MUTEX_DEFAULT, ssl_pool);
+ if (rv != APR_SUCCESS) {
+ /* FIXME: return error here */
+ }
+ return l;
+}
+
+static void ssl_dyn_lock(int mode, CRYPTO_dynlock_value *l, const char *file,
+ int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ apr_thread_mutex_lock(l->lock);
+ }
+ else if (mode & CRYPTO_UNLOCK) {
+ apr_thread_mutex_unlock(l->lock);
+ }
+}
+
+static void ssl_dyn_destroy(CRYPTO_dynlock_value *l, const char *file,
+ int line)
+{
+ apr_thread_mutex_destroy(l->lock);
+}
+
+static void ssl_lock(int mode, int n, const char *file, int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ apr_thread_mutex_lock(ssl_locks[n]);
+ }
+ else if (mode & CRYPTO_UNLOCK) {
+ apr_thread_mutex_unlock(ssl_locks[n]);
+ }
+}
+
+static unsigned long ssl_id(void)
+{
+ /* FIXME: This is lame and not portable. -aaron */
+ return (unsigned long) apr_os_thread_current();
+}
+
+static apr_status_t cleanup_ssl(void *data)
+{
+ CRYPTO_set_locking_callback(NULL);
+ CRYPTO_set_id_callback(NULL);
+ CRYPTO_set_dynlock_create_callback(NULL);
+ CRYPTO_set_dynlock_lock_callback(NULL);
+ CRYPTO_set_dynlock_destroy_callback(NULL);
+
+ return APR_SUCCESS;
+}
+
+#endif
+
+static apr_uint32_t have_init_ssl = 0;
+
+static void init_ssl_libraries(void)
+{
+ apr_uint32_t val;
+#if APR_VERSION_AT_LEAST(1,0,0)
+ val = apr_atomic_xchg32(&have_init_ssl, 1);
+#else
+ val = apr_atomic_cas(&have_init_ssl, 1, 0);
+#endif
+
+ if (!val) {
+#if APR_HAS_THREADS
+ int i, numlocks;
+#endif
+ CRYPTO_malloc_init();
+ ERR_load_crypto_strings();
+ SSL_load_error_strings();
+ SSL_library_init();
+ OpenSSL_add_all_algorithms();
+
+#if APR_HAS_THREADS
+ numlocks = CRYPTO_num_locks();
+ apr_pool_create(&ssl_pool, NULL);
+ ssl_locks = apr_palloc(ssl_pool, sizeof(apr_thread_mutex_t*)*numlocks);
+ for (i = 0; i < numlocks; i++) {
+ apr_status_t rv;
+
+ /* Intraprocess locks don't /need/ a filename... */
+ rv = apr_thread_mutex_create(&ssl_locks[i],
+ APR_THREAD_MUTEX_DEFAULT, ssl_pool);
+ if (rv != APR_SUCCESS) {
+ /* FIXME: error out here */
+ }
+ }
+ CRYPTO_set_locking_callback(ssl_lock);
+ CRYPTO_set_id_callback(ssl_id);
+ CRYPTO_set_dynlock_create_callback(ssl_dyn_create);
+ CRYPTO_set_dynlock_lock_callback(ssl_dyn_lock);
+ CRYPTO_set_dynlock_destroy_callback(ssl_dyn_destroy);
+
+ apr_pool_cleanup_register(ssl_pool, NULL, cleanup_ssl, cleanup_ssl);
+#endif
+ }
+}
+
+static int ssl_need_client_cert(SSL *ssl, X509 **cert, EVP_PKEY **pkey)
+{
+ serf_ssl_context_t *ctx = SSL_get_app_data(ssl);
+ apr_status_t status;
+
+ if (ctx->cached_cert) {
+ *cert = ctx->cached_cert;
+ *pkey = ctx->cached_cert_pw;
+ return 1;
+ }
+
+ while (ctx->cert_callback) {
+ const char *cert_path;
+ apr_file_t *cert_file;
+ BIO *bio;
+ PKCS12 *p12;
+ int i;
+ int retrying_success = 0;
+
+ if (ctx->cert_file_success) {
+ status = APR_SUCCESS;
+ cert_path = ctx->cert_file_success;
+ ctx->cert_file_success = NULL;
+ retrying_success = 1;
+ } else {
+ status = ctx->cert_callback(ctx->cert_userdata, &cert_path);
+ }
+
+ if (status || !cert_path) {
+ break;
+ }
+
+ /* Load the x.509 cert file stored in PKCS12 */
+ status = apr_file_open(&cert_file, cert_path, APR_READ, APR_OS_DEFAULT,
+ ctx->pool);
+
+ if (status) {
+ continue;
+ }
+
+ bio = BIO_new(&bio_file_method);
+ bio->ptr = cert_file;
+
+ ctx->cert_path = cert_path;
+ p12 = d2i_PKCS12_bio(bio, NULL);
+ apr_file_close(cert_file);
+
+ i = PKCS12_parse(p12, NULL, pkey, cert, NULL);
+
+ if (i == 1) {
+ PKCS12_free(p12);
+ ctx->cached_cert = *cert;
+ ctx->cached_cert_pw = *pkey;
+ if (!retrying_success && ctx->cert_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_cache_pool, ctx->cert_path);
+
+ apr_pool_userdata_setn(c, "serf:ssl:cert",
+ apr_pool_cleanup_null,
+ ctx->cert_cache_pool);
+ }
+ return 1;
+ }
+ else {
+ int err = ERR_get_error();
+ ERR_clear_error();
+ if (ERR_GET_LIB(err) == ERR_LIB_PKCS12 &&
+ ERR_GET_REASON(err) == PKCS12_R_MAC_VERIFY_FAILURE) {
+ if (ctx->cert_pw_callback) {
+ const char *password;
+
+ if (ctx->cert_pw_success) {
+ status = APR_SUCCESS;
+ password = ctx->cert_pw_success;
+ ctx->cert_pw_success = NULL;
+ } else {
+ status = ctx->cert_pw_callback(ctx->cert_pw_userdata,
+ ctx->cert_path,
+ &password);
+ }
+
+ if (!status && password) {
+ i = PKCS12_parse(p12, password, pkey, cert, NULL);
+ if (i == 1) {
+ PKCS12_free(p12);
+ ctx->cached_cert = *cert;
+ ctx->cached_cert_pw = *pkey;
+ if (!retrying_success && ctx->cert_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_cache_pool,
+ ctx->cert_path);
+
+ apr_pool_userdata_setn(c, "serf:ssl:cert",
+ apr_pool_cleanup_null,
+ ctx->cert_cache_pool);
+ }
+ if (!retrying_success && ctx->cert_pw_cache_pool) {
+ const char *c;
+
+ c = apr_pstrdup(ctx->cert_pw_cache_pool,
+ password);
+
+ apr_pool_userdata_setn(c, "serf:ssl:certpw",
+ apr_pool_cleanup_null,
+ ctx->cert_pw_cache_pool);
+ }
+ return 1;
+ }
+ }
+ }
+ PKCS12_free(p12);
+ return 0;
+ }
+ else {
+ printf("OpenSSL cert error: %d %d %d\n", ERR_GET_LIB(err),
+ ERR_GET_FUNC(err),
+ ERR_GET_REASON(err));
+ PKCS12_free(p12);
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+void serf_ssl_client_cert_provider_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_client_cert_t callback,
+ void *data,
+ void *cache_pool)
+{
+ context->cert_callback = callback;
+ context->cert_userdata = data;
+ context->cert_cache_pool = cache_pool;
+ if (context->cert_cache_pool) {
+ apr_pool_userdata_get((void**)&context->cert_file_success,
+ "serf:ssl:cert", cache_pool);
+ }
+}
+
+
+void serf_ssl_client_cert_password_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_cert_password_t callback,
+ void *data,
+ void *cache_pool)
+{
+ context->cert_pw_callback = callback;
+ context->cert_pw_userdata = data;
+ context->cert_pw_cache_pool = cache_pool;
+ if (context->cert_pw_cache_pool) {
+ apr_pool_userdata_get((void**)&context->cert_pw_success,
+ "serf:ssl:certpw", cache_pool);
+ }
+}
+
+
+void serf_ssl_server_cert_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t callback,
+ void *data)
+{
+ context->server_cert_callback = callback;
+ context->server_cert_userdata = data;
+}
+
+void serf_ssl_server_cert_chain_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t cert_callback,
+ serf_ssl_server_cert_chain_cb_t cert_chain_callback,
+ void *data)
+{
+ context->server_cert_callback = cert_callback;
+ context->server_cert_chain_callback = cert_chain_callback;
+ context->server_cert_userdata = data;
+}
+
+static serf_ssl_context_t *ssl_init_context(void)
+{
+ serf_ssl_context_t *ssl_ctx;
+ apr_pool_t *pool;
+ serf_bucket_alloc_t *allocator;
+
+ init_ssl_libraries();
+
+ apr_pool_create(&pool, NULL);
+ allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+
+ ssl_ctx = serf_bucket_mem_alloc(allocator, sizeof(*ssl_ctx));
+
+ ssl_ctx->refcount = 0;
+ ssl_ctx->pool = pool;
+ ssl_ctx->allocator = allocator;
+
+ ssl_ctx->ctx = SSL_CTX_new(SSLv23_client_method());
+
+ SSL_CTX_set_client_cert_cb(ssl_ctx->ctx, ssl_need_client_cert);
+ ssl_ctx->cached_cert = 0;
+ ssl_ctx->cached_cert_pw = 0;
+ ssl_ctx->pending_err = APR_SUCCESS;
+
+ ssl_ctx->cert_callback = NULL;
+ ssl_ctx->cert_pw_callback = NULL;
+ ssl_ctx->server_cert_callback = NULL;
+ ssl_ctx->server_cert_chain_callback = NULL;
+
+ SSL_CTX_set_verify(ssl_ctx->ctx, SSL_VERIFY_PEER,
+ validate_server_certificate);
+ SSL_CTX_set_options(ssl_ctx->ctx, SSL_OP_ALL);
+
+ ssl_ctx->ssl = SSL_new(ssl_ctx->ctx);
+ ssl_ctx->bio = BIO_new(&bio_bucket_method);
+ ssl_ctx->bio->ptr = ssl_ctx;
+
+ SSL_set_bio(ssl_ctx->ssl, ssl_ctx->bio, ssl_ctx->bio);
+
+ SSL_set_connect_state(ssl_ctx->ssl);
+
+ SSL_set_app_data(ssl_ctx->ssl, ssl_ctx);
+
+ ssl_ctx->encrypt.stream = NULL;
+ ssl_ctx->encrypt.stream_next = NULL;
+ ssl_ctx->encrypt.pending = serf_bucket_aggregate_create(allocator);
+ ssl_ctx->encrypt.status = APR_SUCCESS;
+ serf_databuf_init(&ssl_ctx->encrypt.databuf);
+ ssl_ctx->encrypt.databuf.read = ssl_encrypt;
+ ssl_ctx->encrypt.databuf.read_baton = ssl_ctx;
+
+ ssl_ctx->decrypt.stream = NULL;
+ ssl_ctx->decrypt.pending = serf_bucket_aggregate_create(allocator);
+ ssl_ctx->decrypt.status = APR_SUCCESS;
+ serf_databuf_init(&ssl_ctx->decrypt.databuf);
+ ssl_ctx->decrypt.databuf.read = ssl_decrypt;
+ ssl_ctx->decrypt.databuf.read_baton = ssl_ctx;
+
+ return ssl_ctx;
+}
+
+static apr_status_t ssl_free_context(
+ serf_ssl_context_t *ssl_ctx)
+{
+ apr_pool_t *p;
+
+ /* If never had the pending buckets, don't try to free them. */
+ if (ssl_ctx->decrypt.pending != NULL) {
+ serf_bucket_destroy(ssl_ctx->decrypt.pending);
+ }
+ if (ssl_ctx->encrypt.pending != NULL) {
+ serf_bucket_destroy(ssl_ctx->encrypt.pending);
+ }
+
+ /* SSL_free implicitly frees the underlying BIO. */
+ SSL_free(ssl_ctx->ssl);
+ SSL_CTX_free(ssl_ctx->ctx);
+
+ p = ssl_ctx->pool;
+
+ serf_bucket_mem_free(ssl_ctx->allocator, ssl_ctx);
+ apr_pool_destroy(p);
+
+ return APR_SUCCESS;
+}
+
+static serf_bucket_t * serf_bucket_ssl_create(
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator,
+ const serf_bucket_type_t *type)
+{
+ ssl_context_t *ctx;
+
+ ctx = serf_bucket_mem_alloc(allocator, sizeof(*ctx));
+ if (!ssl_ctx) {
+ ctx->ssl_ctx = ssl_init_context();
+ }
+ else {
+ ctx->ssl_ctx = ssl_ctx;
+ }
+ ctx->ssl_ctx->refcount++;
+
+ return serf_bucket_create(type, allocator, ctx);
+}
+
+apr_status_t serf_ssl_set_hostname(serf_ssl_context_t *context,
+ const char * hostname)
+{
+#ifdef SSL_set_tlsext_host_name
+ if (SSL_set_tlsext_host_name(context->ssl, hostname) != 1) {
+ ERR_clear_error();
+ }
+#endif
+ return APR_SUCCESS;
+}
+
+apr_status_t serf_ssl_use_default_certificates(serf_ssl_context_t *ssl_ctx)
+{
+ X509_STORE *store = SSL_CTX_get_cert_store(ssl_ctx->ctx);
+
+ int result = X509_STORE_set_default_paths(store);
+
+ return result ? APR_SUCCESS : APR_EGENERAL;
+}
+
+apr_status_t serf_ssl_load_cert_file(
+ serf_ssl_certificate_t **cert,
+ const char *file_path,
+ apr_pool_t *pool)
+{
+ FILE *fp = fopen(file_path, "r");
+
+ if (fp) {
+ X509 *ssl_cert = PEM_read_X509(fp, NULL, NULL, NULL);
+ fclose(fp);
+
+ if (ssl_cert) {
+ *cert = apr_palloc(pool, sizeof(serf_ssl_certificate_t));
+ (*cert)->ssl_cert = ssl_cert;
+
+ return APR_SUCCESS;
+ }
+ }
+
+ return APR_EGENERAL;
+}
+
+
+apr_status_t serf_ssl_trust_cert(
+ serf_ssl_context_t *ssl_ctx,
+ serf_ssl_certificate_t *cert)
+{
+ X509_STORE *store = SSL_CTX_get_cert_store(ssl_ctx->ctx);
+
+ int result = X509_STORE_add_cert(store, cert->ssl_cert);
+
+ return result ? APR_SUCCESS : APR_EGENERAL;
+}
+
+
+serf_bucket_t *serf_bucket_ssl_decrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bkt;
+ ssl_context_t *ctx;
+
+ bkt = serf_bucket_ssl_create(ssl_ctx, allocator,
+ &serf_bucket_type_ssl_decrypt);
+
+ ctx = bkt->data;
+
+ ctx->databuf = &ctx->ssl_ctx->decrypt.databuf;
+ if (ctx->ssl_ctx->decrypt.stream != NULL) {
+ return NULL;
+ }
+ ctx->ssl_ctx->decrypt.stream = stream;
+ ctx->our_stream = &ctx->ssl_ctx->decrypt.stream;
+
+ return bkt;
+}
+
+
+serf_ssl_context_t *serf_bucket_ssl_decrypt_context_get(
+ serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ return ctx->ssl_ctx;
+}
+
+
+serf_bucket_t *serf_bucket_ssl_encrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_ctx,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bkt;
+ ssl_context_t *ctx;
+
+ bkt = serf_bucket_ssl_create(ssl_ctx, allocator,
+ &serf_bucket_type_ssl_encrypt);
+
+ ctx = bkt->data;
+
+ ctx->databuf = &ctx->ssl_ctx->encrypt.databuf;
+ ctx->our_stream = &ctx->ssl_ctx->encrypt.stream;
+ if (ctx->ssl_ctx->encrypt.stream == NULL) {
+ serf_bucket_t *tmp = serf_bucket_aggregate_create(stream->allocator);
+ serf_bucket_aggregate_append(tmp, stream);
+ ctx->ssl_ctx->encrypt.stream = tmp;
+ }
+ else {
+ bucket_list_t *new_list;
+
+ new_list = serf_bucket_mem_alloc(ctx->ssl_ctx->allocator,
+ sizeof(*new_list));
+ new_list->bucket = stream;
+ new_list->next = NULL;
+ if (ctx->ssl_ctx->encrypt.stream_next == NULL) {
+ ctx->ssl_ctx->encrypt.stream_next = new_list;
+ }
+ else {
+ bucket_list_t *scan = ctx->ssl_ctx->encrypt.stream_next;
+
+ while (scan->next != NULL)
+ scan = scan->next;
+ scan->next = new_list;
+ }
+ }
+
+ return bkt;
+}
+
+
+serf_ssl_context_t *serf_bucket_ssl_encrypt_context_get(
+ serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ return ctx->ssl_ctx;
+}
+
+/* Functions to read a serf_ssl_certificate structure. */
+
+/* Creates a hash_table with keys (E, CN, OU, O, L, ST and C). */
+static apr_hash_t *
+convert_X509_NAME_to_table(X509_NAME *org, apr_pool_t *pool)
+{
+ char buf[1024];
+ int ret;
+
+ apr_hash_t *tgt = apr_hash_make(pool);
+
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_commonName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "CN", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_pkcs9_emailAddress,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "E", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_organizationalUnitName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "OU", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_organizationName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "O", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_localityName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "L", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_stateOrProvinceName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "ST", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+ ret = X509_NAME_get_text_by_NID(org,
+ NID_countryName,
+ buf, 1024);
+ if (ret != -1)
+ apr_hash_set(tgt, "C", APR_HASH_KEY_STRING, apr_pstrdup(pool, buf));
+
+ return tgt;
+}
+
+
+int serf_ssl_cert_depth(const serf_ssl_certificate_t *cert)
+{
+ return cert->depth;
+}
+
+
+apr_hash_t *serf_ssl_cert_issuer(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ X509_NAME *issuer = X509_get_issuer_name(cert->ssl_cert);
+
+ if (!issuer)
+ return NULL;
+
+ return convert_X509_NAME_to_table(issuer, pool);
+}
+
+
+apr_hash_t *serf_ssl_cert_subject(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ X509_NAME *subject = X509_get_subject_name(cert->ssl_cert);
+
+ if (!subject)
+ return NULL;
+
+ return convert_X509_NAME_to_table(subject, pool);
+}
+
+
+apr_hash_t *serf_ssl_cert_certificate(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ apr_hash_t *tgt = apr_hash_make(pool);
+ unsigned int md_size, i;
+ unsigned char md[EVP_MAX_MD_SIZE];
+ BIO *bio;
+ STACK_OF(GENERAL_NAME) *names;
+
+ /* sha1 fingerprint */
+ if (X509_digest(cert->ssl_cert, EVP_sha1(), md, &md_size)) {
+ const char hex[] = "0123456789ABCDEF";
+ char fingerprint[EVP_MAX_MD_SIZE * 3];
+
+ for (i=0; i<md_size; i++) {
+ fingerprint[3*i] = hex[(md[i] & 0xf0) >> 4];
+ fingerprint[(3*i)+1] = hex[(md[i] & 0x0f)];
+ fingerprint[(3*i)+2] = ':';
+ }
+ if (md_size > 0)
+ fingerprint[(3*(md_size-1))+2] = '\0';
+ else
+ fingerprint[0] = '\0';
+
+ apr_hash_set(tgt, "sha1", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, fingerprint));
+ }
+
+ /* set expiry dates */
+ bio = BIO_new(BIO_s_mem());
+ if (bio) {
+ ASN1_TIME *notBefore, *notAfter;
+ char buf[256];
+
+ memset (buf, 0, sizeof (buf));
+ notBefore = X509_get_notBefore(cert->ssl_cert);
+ if (ASN1_TIME_print(bio, notBefore)) {
+ BIO_read(bio, buf, 255);
+ apr_hash_set(tgt, "notBefore", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, buf));
+ }
+ memset (buf, 0, sizeof (buf));
+ notAfter = X509_get_notAfter(cert->ssl_cert);
+ if (ASN1_TIME_print(bio, notAfter)) {
+ BIO_read(bio, buf, 255);
+ apr_hash_set(tgt, "notAfter", APR_HASH_KEY_STRING,
+ apr_pstrdup(pool, buf));
+ }
+ }
+ BIO_free(bio);
+
+ /* Get subjectAltNames */
+ names = X509_get_ext_d2i(cert->ssl_cert, NID_subject_alt_name, NULL, NULL);
+ if (names) {
+ int names_count = sk_GENERAL_NAME_num(names);
+
+ apr_array_header_t *san_arr = apr_array_make(pool, names_count,
+ sizeof(char*));
+ apr_hash_set(tgt, "subjectAltName", APR_HASH_KEY_STRING, san_arr);
+ for (i = 0; i < names_count; i++) {
+ char *p = NULL;
+ GENERAL_NAME *nm = sk_GENERAL_NAME_value(names, i);
+
+ switch (nm->type) {
+ case GEN_DNS:
+ p = apr_pstrmemdup(pool, (const char *)nm->d.ia5->data,
+ nm->d.ia5->length);
+ break;
+ default:
+ /* Don't know what to do - skip. */
+ break;
+ }
+ if (p) {
+ APR_ARRAY_PUSH(san_arr, char*) = p;
+ }
+ }
+ sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free);
+ }
+
+ return tgt;
+}
+
+
+const char *serf_ssl_cert_export(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool)
+{
+ char *binary_cert;
+ char *encoded_cert;
+ int len;
+ unsigned char *unused;
+
+ /* find the length of the DER encoding. */
+ len = i2d_X509(cert->ssl_cert, NULL);
+ if (len < 0) {
+ return NULL;
+ }
+
+ binary_cert = apr_palloc(pool, len);
+ unused = (unsigned char *)binary_cert;
+ len = i2d_X509(cert->ssl_cert, &unused); /* unused is incremented */
+ if (len < 0) {
+ return NULL;
+ }
+
+ encoded_cert = apr_palloc(pool, apr_base64_encode_len(len));
+ apr_base64_encode(encoded_cert, binary_cert, len);
+
+ return encoded_cert;
+}
+
+static void serf_ssl_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ if (!--ctx->ssl_ctx->refcount) {
+ ssl_free_context(ctx->ssl_ctx);
+ }
+
+ serf_default_destroy_and_data(bucket);
+}
+
+static void serf_ssl_decrypt_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ serf_bucket_destroy(*ctx->our_stream);
+
+ serf_ssl_destroy_and_data(bucket);
+}
+
+static void serf_ssl_encrypt_destroy_and_data(serf_bucket_t *bucket)
+{
+ ssl_context_t *ctx = bucket->data;
+ serf_ssl_context_t *ssl_ctx = ctx->ssl_ctx;
+
+ if (ssl_ctx->encrypt.stream == *ctx->our_stream) {
+ serf_bucket_destroy(*ctx->our_stream);
+ serf_bucket_destroy(ssl_ctx->encrypt.pending);
+
+ /* Reset our encrypted status and databuf. */
+ ssl_ctx->encrypt.status = APR_SUCCESS;
+ ssl_ctx->encrypt.databuf.status = APR_SUCCESS;
+
+ /* Advance to the next stream - if we have one. */
+ if (ssl_ctx->encrypt.stream_next == NULL) {
+ ssl_ctx->encrypt.stream = NULL;
+ ssl_ctx->encrypt.pending = NULL;
+ }
+ else {
+ bucket_list_t *cur;
+
+ cur = ssl_ctx->encrypt.stream_next;
+ ssl_ctx->encrypt.stream = cur->bucket;
+ ssl_ctx->encrypt.pending =
+ serf_bucket_aggregate_create(cur->bucket->allocator);
+ ssl_ctx->encrypt.stream_next = cur->next;
+ serf_bucket_mem_free(ssl_ctx->allocator, cur);
+ }
+ }
+ else {
+ /* Ah, darn. We haven't sent this one along yet. */
+ return;
+ }
+ serf_ssl_destroy_and_data(bucket);
+}
+
+static apr_status_t serf_ssl_read(serf_bucket_t *bucket,
+ apr_size_t requested,
+ const char **data, apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_read(ctx->databuf, requested, data, len);
+}
+
+static apr_status_t serf_ssl_readline(serf_bucket_t *bucket,
+ int acceptable, int *found,
+ const char **data,
+ apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_readline(ctx->databuf, acceptable, found, data, len);
+}
+
+static apr_status_t serf_ssl_peek(serf_bucket_t *bucket,
+ const char **data,
+ apr_size_t *len)
+{
+ ssl_context_t *ctx = bucket->data;
+
+ return serf_databuf_peek(ctx->databuf, data, len);
+}
+
+
+const serf_bucket_type_t serf_bucket_type_ssl_encrypt = {
+ "SSLENCRYPT",
+ serf_ssl_read,
+ serf_ssl_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_ssl_peek,
+ serf_ssl_encrypt_destroy_and_data,
+};
+
+const serf_bucket_type_t serf_bucket_type_ssl_decrypt = {
+ "SSLDECRYPT",
+ serf_ssl_read,
+ serf_ssl_readline,
+ serf_default_read_iovec,
+ serf_default_read_for_sendfile,
+ serf_default_read_bucket,
+ serf_ssl_peek,
+ serf_ssl_decrypt_destroy_and_data,
+};
diff --git a/build/apr_common.m4 b/build/apr_common.m4
new file mode 100644
index 0000000..50dbfac
--- /dev/null
+++ b/build/apr_common.m4
@@ -0,0 +1,985 @@
+dnl -------------------------------------------------------- -*- autoconf -*-
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl
+dnl apr_common.m4: APR's general-purpose autoconf macros
+dnl
+
+dnl
+dnl APR_CONFIG_NICE(filename)
+dnl
+dnl Saves a snapshot of the configure command-line for later reuse
+dnl
+AC_DEFUN([APR_CONFIG_NICE], [
+ rm -f $1
+ cat >$1<<EOF
+#! /bin/sh
+#
+# Created by configure
+
+EOF
+ if test -n "$CC"; then
+ echo "CC=\"$CC\"; export CC" >> $1
+ fi
+ if test -n "$CFLAGS"; then
+ echo "CFLAGS=\"$CFLAGS\"; export CFLAGS" >> $1
+ fi
+ if test -n "$CPPFLAGS"; then
+ echo "CPPFLAGS=\"$CPPFLAGS\"; export CPPFLAGS" >> $1
+ fi
+ if test -n "$LDFLAGS"; then
+ echo "LDFLAGS=\"$LDFLAGS\"; export LDFLAGS" >> $1
+ fi
+ if test -n "$LTFLAGS"; then
+ echo "LTFLAGS=\"$LTFLAGS\"; export LTFLAGS" >> $1
+ fi
+ if test -n "$LIBS"; then
+ echo "LIBS=\"$LIBS\"; export LIBS" >> $1
+ fi
+ if test -n "$INCLUDES"; then
+ echo "INCLUDES=\"$INCLUDES\"; export INCLUDES" >> $1
+ fi
+ if test -n "$NOTEST_CFLAGS"; then
+ echo "NOTEST_CFLAGS=\"$NOTEST_CFLAGS\"; export NOTEST_CFLAGS" >> $1
+ fi
+ if test -n "$NOTEST_CPPFLAGS"; then
+ echo "NOTEST_CPPFLAGS=\"$NOTEST_CPPFLAGS\"; export NOTEST_CPPFLAGS" >> $1
+ fi
+ if test -n "$NOTEST_LDFLAGS"; then
+ echo "NOTEST_LDFLAGS=\"$NOTEST_LDFLAGS\"; export NOTEST_LDFLAGS" >> $1
+ fi
+ if test -n "$NOTEST_LIBS"; then
+ echo "NOTEST_LIBS=\"$NOTEST_LIBS\"; export NOTEST_LIBS" >> $1
+ fi
+
+ # Retrieve command-line arguments.
+ eval "set x $[0] $ac_configure_args"
+ shift
+
+ for arg
+ do
+ APR_EXPAND_VAR(arg, $arg)
+ echo "\"[$]arg\" \\" >> $1
+ done
+ echo '"[$]@"' >> $1
+ chmod +x $1
+])dnl
+
+dnl APR_MKDIR_P_CHECK(fallback-mkdir-p)
+dnl checks whether mkdir -p works
+AC_DEFUN([APR_MKDIR_P_CHECK], [
+ AC_CACHE_CHECK(for working mkdir -p, ac_cv_mkdir_p,[
+ test -d conftestdir && rm -rf conftestdir
+ mkdir -p conftestdir/somedir >/dev/null 2>&1
+ if test -d conftestdir/somedir; then
+ ac_cv_mkdir_p=yes
+ else
+ ac_cv_mkdir_p=no
+ fi
+ rm -rf conftestdir
+ ])
+ if test "$ac_cv_mkdir_p" = "yes"; then
+ mkdir_p="mkdir -p"
+ else
+ mkdir_p="$1"
+ fi
+])
+
+dnl
+dnl APR_SUBDIR_CONFIG(dir [, sub-package-cmdline-args, args-to-drop])
+dnl
+dnl dir: directory to find configure in
+dnl sub-package-cmdline-args: arguments to add to the invocation (optional)
+dnl args-to-drop: arguments to drop from the invocation (optional)
+dnl
+dnl Note: This macro relies on ac_configure_args being set properly.
+dnl
+dnl The args-to-drop argument is shoved into a case statement, so
+dnl multiple arguments can be separated with a |.
+dnl
+dnl Note: Older versions of autoconf do not single-quote args, while 2.54+
+dnl places quotes around every argument. So, if you want to drop the
+dnl argument called --enable-layout, you must pass the third argument as:
+dnl [--enable-layout=*|\'--enable-layout=*]
+dnl
+dnl Trying to optimize this is left as an exercise to the reader who wants
+dnl to put up with more autoconf craziness. I give up.
+dnl
+AC_DEFUN([APR_SUBDIR_CONFIG], [
+ # save our work to this point; this allows the sub-package to use it
+ AC_CACHE_SAVE
+
+ echo "configuring package in $1 now"
+ ac_popdir=`pwd`
+ apr_config_subdirs="$1"
+ test -d $1 || $mkdir_p $1
+ ac_abs_srcdir=`(cd $srcdir/$1 && pwd)`
+ cd $1
+
+changequote(, )dnl
+ # A "../" for each directory in /$config_subdirs.
+ ac_dots=`echo $apr_config_subdirs|sed -e 's%^\./%%' -e 's%[^/]$%&/%' -e 's%[^/]*/%../%g'`
+changequote([, ])dnl
+
+ # Make the cache file pathname absolute for the subdirs
+ # required to correctly handle subdirs that might actually
+ # be symlinks
+ case "$cache_file" in
+ /*) # already absolute
+ ac_sub_cache_file=$cache_file ;;
+ *) # Was relative path.
+ ac_sub_cache_file="$ac_popdir/$cache_file" ;;
+ esac
+
+ ifelse($3, [], [apr_configure_args=$ac_configure_args],[
+ apr_configure_args=
+ apr_sep=
+ for apr_configure_arg in $ac_configure_args
+ do
+ case "$apr_configure_arg" in
+ $3)
+ continue ;;
+ esac
+ apr_configure_args="$apr_configure_args$apr_sep'$apr_configure_arg'"
+ apr_sep=" "
+ done
+ ])
+
+ dnl autoconf doesn't add --silent to ac_configure_args; explicitly pass it
+ test "x$silent" = "xyes" && apr_configure_args="$apr_configure_args --silent"
+
+ dnl AC_CONFIG_SUBDIRS silences option warnings, emulate this for 2.62
+ apr_configure_args="--disable-option-checking $apr_configure_args"
+
+ dnl The eval makes quoting arguments work - specifically the second argument
+ dnl where the quoting mechanisms used is "" rather than [].
+ dnl
+ dnl We need to execute another shell because some autoconf/shell combinations
+ dnl will choke after doing repeated APR_SUBDIR_CONFIG()s. (Namely Solaris
+ dnl and autoconf-2.54+)
+ if eval $SHELL $ac_abs_srcdir/configure $apr_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_abs_srcdir $2
+ then :
+ echo "$1 configured properly"
+ else
+ echo "configure failed for $1"
+ exit 1
+ fi
+
+ cd $ac_popdir
+
+ # grab any updates from the sub-package
+ AC_CACHE_LOAD
+])dnl
+
+dnl
+dnl APR_SAVE_THE_ENVIRONMENT(variable_name)
+dnl
+dnl Stores the variable (usually a Makefile macro) for later restoration
+dnl
+AC_DEFUN([APR_SAVE_THE_ENVIRONMENT], [
+ apr_ste_save_$1="$$1"
+])dnl
+
+dnl
+dnl APR_RESTORE_THE_ENVIRONMENT(variable_name, prefix_)
+dnl
+dnl Uses the previously saved variable content to figure out what configure
+dnl has added to the variable, moving the new bits to prefix_variable_name
+dnl and restoring the original variable contents. This makes it possible
+dnl for a user to override configure when it does something stupid.
+dnl
+AC_DEFUN([APR_RESTORE_THE_ENVIRONMENT], [
+dnl Check whether $apr_ste_save_$1 is empty or
+dnl only whitespace. The verbatim "X" is token number 1,
+dnl the following whitespace will be ignored.
+set X $apr_ste_save_$1
+if test ${#} -eq 1; then
+ $2$1="$$1"
+ $1=
+else
+ if test "x$apr_ste_save_$1" = "x$$1"; then
+ $2$1=
+ else
+ $2$1=`echo "$$1" | sed -e "s%${apr_ste_save_$1}%%"`
+ $1="$apr_ste_save_$1"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring $1 to \"$$1\""
+ echo " setting $2$1 to \"$$2$1\""
+fi
+AC_SUBST($2$1)
+])dnl
+
+dnl
+dnl APR_SETIFNULL(variable, value)
+dnl
+dnl Set variable iff it's currently null
+dnl
+AC_DEFUN([APR_SETIFNULL], [
+ if test -z "$$1"; then
+ test "x$silent" != "xyes" && echo " setting $1 to \"$2\""
+ $1="$2"
+ fi
+])dnl
+
+dnl
+dnl APR_SETVAR(variable, value)
+dnl
+dnl Set variable no matter what
+dnl
+AC_DEFUN([APR_SETVAR], [
+ test "x$silent" != "xyes" && echo " forcing $1 to \"$2\""
+ $1="$2"
+])dnl
+
+dnl
+dnl APR_ADDTO(variable, value)
+dnl
+dnl Add value to variable
+dnl
+AC_DEFUN([APR_ADDTO], [
+ if test "x$$1" = "x"; then
+ test "x$silent" != "xyes" && echo " setting $1 to \"$2\""
+ $1="$2"
+ else
+ apr_addto_bugger="$2"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $$1; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to $1"
+ $1="$$1 $i"
+ fi
+ done
+ fi
+])dnl
+
+dnl
+dnl APR_REMOVEFROM(variable, value)
+dnl
+dnl Remove a value from a variable
+dnl
+AC_DEFUN([APR_REMOVEFROM], [
+ if test "x$$1" = "x$2"; then
+ test "x$silent" != "xyes" && echo " nulling $1"
+ $1=""
+ else
+ apr_new_bugger=""
+ apr_removed=0
+ for i in $$1; do
+ if test "x$i" != "x$2"; then
+ apr_new_bugger="$apr_new_bugger $i"
+ else
+ apr_removed=1
+ fi
+ done
+ if test $apr_removed = "1"; then
+ test "x$silent" != "xyes" && echo " removed \"$2\" from $1"
+ $1=$apr_new_bugger
+ fi
+ fi
+]) dnl
+
+dnl
+dnl APR_CHECK_DEFINE_FILES( symbol, header_file [header_file ...] )
+dnl
+AC_DEFUN([APR_CHECK_DEFINE_FILES], [
+ AC_CACHE_CHECK([for $1 in $2],ac_cv_define_$1,[
+ ac_cv_define_$1=no
+ for curhdr in $2
+ do
+ AC_EGREP_CPP(YES_IS_DEFINED, [
+#include <$curhdr>
+#ifdef $1
+YES_IS_DEFINED
+#endif
+ ], ac_cv_define_$1=yes)
+ done
+ ])
+ if test "$ac_cv_define_$1" = "yes"; then
+ AC_DEFINE(HAVE_$1, 1, [Define if $1 is defined])
+ fi
+])
+
+
+dnl
+dnl APR_CHECK_DEFINE(symbol, header_file)
+dnl
+AC_DEFUN([APR_CHECK_DEFINE], [
+ AC_CACHE_CHECK([for $1 in $2],ac_cv_define_$1,[
+ AC_EGREP_CPP(YES_IS_DEFINED, [
+#include <$2>
+#ifdef $1
+YES_IS_DEFINED
+#endif
+ ], ac_cv_define_$1=yes, ac_cv_define_$1=no)
+ ])
+ if test "$ac_cv_define_$1" = "yes"; then
+ AC_DEFINE(HAVE_$1, 1, [Define if $1 is defined in $2])
+ fi
+])
+
+dnl
+dnl APR_CHECK_APR_DEFINE( symbol )
+dnl
+AC_DEFUN([APR_CHECK_APR_DEFINE], [
+apr_old_cppflags=$CPPFLAGS
+CPPFLAGS="$CPPFLAGS $INCLUDES"
+AC_EGREP_CPP(YES_IS_DEFINED, [
+#include <apr.h>
+#if $1
+YES_IS_DEFINED
+#endif
+], ac_cv_define_$1=yes, ac_cv_define_$1=no)
+CPPFLAGS=$apr_old_cppflags
+])
+
+dnl APR_CHECK_FILE(filename); set ac_cv_file_filename to
+dnl "yes" if 'filename' is readable, else "no".
+dnl @deprecated! - use AC_CHECK_FILE instead
+AC_DEFUN([APR_CHECK_FILE], [
+dnl Pick a safe variable name
+define([apr_cvname], ac_cv_file_[]translit([$1], [./+-], [__p_]))
+AC_CACHE_CHECK([for $1], [apr_cvname],
+[if test -r $1; then
+ apr_cvname=yes
+ else
+ apr_cvname=no
+ fi])
+])
+
+define(APR_IFALLYES,[dnl
+ac_rc=yes
+for ac_spec in $1; do
+ ac_type=`echo "$ac_spec" | sed -e 's/:.*$//'`
+ ac_item=`echo "$ac_spec" | sed -e 's/^.*://'`
+ case $ac_type in
+ header )
+ ac_item=`echo "$ac_item" | sed 'y%./+-%__p_%'`
+ ac_var="ac_cv_header_$ac_item"
+ ;;
+ file )
+ ac_item=`echo "$ac_item" | sed 'y%./+-%__p_%'`
+ ac_var="ac_cv_file_$ac_item"
+ ;;
+ func ) ac_var="ac_cv_func_$ac_item" ;;
+ struct ) ac_var="ac_cv_struct_$ac_item" ;;
+ define ) ac_var="ac_cv_define_$ac_item" ;;
+ custom ) ac_var="$ac_item" ;;
+ esac
+ eval "ac_val=\$$ac_var"
+ if test ".$ac_val" != .yes; then
+ ac_rc=no
+ break
+ fi
+done
+if test ".$ac_rc" = .yes; then
+ :
+ $2
+else
+ :
+ $3
+fi
+])
+
+
+define(APR_BEGIN_DECISION,[dnl
+ac_decision_item='$1'
+ac_decision_msg='FAILED'
+ac_decision=''
+])
+
+
+AC_DEFUN([APR_DECIDE],[dnl
+dnl Define the flag (or not) in apr_private.h via autoheader
+AH_TEMPLATE($1, [Define if $2 will be used])
+ac_decision='$1'
+ac_decision_msg='$2'
+ac_decision_$1=yes
+ac_decision_$1_msg='$2'
+])
+
+
+define(APR_DECISION_OVERRIDE,[dnl
+ ac_decision=''
+ for ac_item in $1; do
+ eval "ac_decision_this=\$ac_decision_${ac_item}"
+ if test ".$ac_decision_this" = .yes; then
+ ac_decision=$ac_item
+ eval "ac_decision_msg=\$ac_decision_${ac_item}_msg"
+ fi
+ done
+])
+
+
+define(APR_DECISION_FORCE,[dnl
+ac_decision="$1"
+eval "ac_decision_msg=\"\$ac_decision_${ac_decision}_msg\""
+])
+
+
+define(APR_END_DECISION,[dnl
+if test ".$ac_decision" = .; then
+ echo "[$]0:Error: decision on $ac_decision_item failed" 1>&2
+ exit 1
+else
+ if test ".$ac_decision_msg" = .; then
+ ac_decision_msg="$ac_decision"
+ fi
+ AC_DEFINE_UNQUOTED(${ac_decision_item})
+ AC_MSG_RESULT([decision on $ac_decision_item... $ac_decision_msg])
+fi
+])
+
+
+dnl
+dnl APR_CHECK_SIZEOF_EXTENDED(INCLUDES, TYPE [, CROSS_SIZE])
+dnl
+dnl A variant of AC_CHECK_SIZEOF which allows the checking of
+dnl sizes of non-builtin types
+dnl
+AC_DEFUN([APR_CHECK_SIZEOF_EXTENDED],
+[changequote(<<, >>)dnl
+dnl The name to #define.
+define(<<AC_TYPE_NAME>>, translit(sizeof_$2, [a-z *], [A-Z_P]))dnl
+dnl The cache variable name.
+define(<<AC_CV_NAME>>, translit(ac_cv_sizeof_$2, [ *], [_p]))dnl
+changequote([, ])dnl
+AC_MSG_CHECKING(size of $2)
+AC_CACHE_VAL(AC_CV_NAME,
+[AC_TRY_RUN([#include <stdio.h>
+$1
+main()
+{
+ FILE *f=fopen("conftestval", "w");
+ if (!f) exit(1);
+ fprintf(f, "%d\n", sizeof($2));
+ exit(0);
+}], AC_CV_NAME=`cat conftestval`, AC_CV_NAME=0, ifelse([$3],,,
+AC_CV_NAME=$3))])dnl
+AC_MSG_RESULT($AC_CV_NAME)
+AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME, [The size of ]$2)
+undefine([AC_TYPE_NAME])dnl
+undefine([AC_CV_NAME])dnl
+])
+
+
+dnl
+dnl APR_TRY_COMPILE_NO_WARNING(INCLUDES, FUNCTION-BODY,
+dnl [ACTIONS-IF-NO-WARNINGS], [ACTIONS-IF-WARNINGS])
+dnl
+dnl Tries a compile test with warnings activated so that the result
+dnl is false if the code doesn't compile cleanly. For compilers
+dnl where it is not known how to activate a "fail-on-error" mode,
+dnl it is undefined which of the sets of actions will be run.
+dnl
+AC_DEFUN([APR_TRY_COMPILE_NO_WARNING],
+[apr_save_CFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS $CFLAGS_WARN"
+ if test "$ac_cv_prog_gcc" = "yes"; then
+ CFLAGS="$CFLAGS -Werror"
+ fi
+ AC_COMPILE_IFELSE(
+ [AC_LANG_SOURCE(
+ [#include "confdefs.h"
+ ]
+ [[$1]]
+ [int main(int argc, const char *const *argv) {]
+ [[$2]]
+ [ return 0; }]
+ )],
+ [$3], [$4])
+ CFLAGS=$apr_save_CFLAGS
+])
+
+dnl
+dnl APR_CHECK_STRERROR_R_RC
+dnl
+dnl Decide which style of retcode is used by this system's
+dnl strerror_r(). It either returns int (0 for success, -1
+dnl for failure), or it returns a pointer to the error
+dnl string.
+dnl
+dnl
+AC_DEFUN([APR_CHECK_STRERROR_R_RC], [
+AC_MSG_CHECKING(for type of return code from strerror_r)
+AC_TRY_RUN([
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+main()
+{
+ char buf[1024];
+ if (strerror_r(ERANGE, buf, sizeof buf) < 1) {
+ exit(0);
+ }
+ else {
+ exit(1);
+ }
+}], [
+ ac_cv_strerror_r_rc_int=yes ], [
+ ac_cv_strerror_r_rc_int=no ], [
+ ac_cv_strerror_r_rc_int=no ] )
+if test "x$ac_cv_strerror_r_rc_int" = xyes; then
+ AC_DEFINE(STRERROR_R_RC_INT, 1, [Define if strerror returns int])
+ msg="int"
+else
+ msg="pointer"
+fi
+AC_MSG_RESULT([$msg])
+] )
+
+dnl
+dnl APR_CHECK_DIRENT_INODE
+dnl
+dnl Decide if d_fileno or d_ino are available in the dirent
+dnl structure on this platform. Single UNIX Spec says d_ino,
+dnl BSD uses d_fileno. Undef to find the real beast.
+dnl
+AC_DEFUN([APR_CHECK_DIRENT_INODE], [
+AC_CACHE_CHECK([for inode member of struct dirent], apr_cv_dirent_inode, [
+apr_cv_dirent_inode=no
+AC_TRY_COMPILE([
+#include <sys/types.h>
+#include <dirent.h>
+],[
+#ifdef d_ino
+#undef d_ino
+#endif
+struct dirent de; de.d_fileno;
+], apr_cv_dirent_inode=d_fileno)
+if test "$apr_cv_dirent_inode" = "no"; then
+AC_TRY_COMPILE([
+#include <sys/types.h>
+#include <dirent.h>
+],[
+#ifdef d_fileno
+#undef d_fileno
+#endif
+struct dirent de; de.d_ino;
+], apr_cv_dirent_inode=d_ino)
+fi
+])
+if test "$apr_cv_dirent_inode" != "no"; then
+ AC_DEFINE_UNQUOTED(DIRENT_INODE, $apr_cv_dirent_inode,
+ [Define if struct dirent has an inode member])
+fi
+])
+
+dnl
+dnl APR_CHECK_DIRENT_TYPE
+dnl
+dnl Decide if d_type is available in the dirent structure
+dnl on this platform. Not part of the Single UNIX Spec.
+dnl Note that this is worthless without DT_xxx macros, so
+dnl look for one while we are at it.
+dnl
+AC_DEFUN([APR_CHECK_DIRENT_TYPE], [
+AC_CACHE_CHECK([for file type member of struct dirent], apr_cv_dirent_type,[
+apr_cv_dirent_type=no
+AC_TRY_COMPILE([
+#include <sys/types.h>
+#include <dirent.h>
+],[
+struct dirent de; de.d_type = DT_REG;
+], apr_cv_dirent_type=d_type)
+])
+if test "$apr_cv_dirent_type" != "no"; then
+ AC_DEFINE_UNQUOTED(DIRENT_TYPE, $apr_cv_dirent_type,
+ [Define if struct dirent has a d_type member])
+fi
+])
+
+dnl the following is a newline, a space, a tab, and a backslash (the
+dnl backslash is used by the shell to skip newlines, but m4 sees it;
+dnl treat it like whitespace).
+dnl WARNING: don't reindent these lines, or the space/tab will be lost!
+define([apr_whitespace],[
+ \])
+
+dnl
+dnl APR_COMMA_ARGS(ARG1 ...)
+dnl convert the whitespace-separated arguments into comman-separated
+dnl arguments.
+dnl
+dnl APR_FOREACH(CODE-BLOCK, ARG1, ARG2, ...)
+dnl subsitute CODE-BLOCK for each ARG[i]. "eachval" will be set to ARG[i]
+dnl within each iteration.
+dnl
+changequote({,})
+define({APR_COMMA_ARGS},{patsubst([$}{1],[[}apr_whitespace{]+],[,])})
+define({APR_FOREACH},
+ {ifelse($}{2,,,
+ [define([eachval],
+ $}{2)$}{1[]APR_FOREACH([$}{1],
+ builtin([shift],
+ builtin([shift], $}{@)))])})
+changequote([,])
+
+dnl APR_FLAG_HEADERS(HEADER-FILE ... [, FLAG-TO-SET ] [, "yes" ])
+dnl we set FLAG-TO-SET to 1 if we find HEADER-FILE, otherwise we set to 0
+dnl if FLAG-TO-SET is null, we automagically determine it's name
+dnl by changing all "/" to "_" in the HEADER-FILE and dropping
+dnl all "." and "-" chars. If the 3rd parameter is "yes" then instead of
+dnl setting to 1 or 0, we set FLAG-TO-SET to yes or no.
+dnl
+AC_DEFUN([APR_FLAG_HEADERS], [
+AC_CHECK_HEADERS($1)
+for aprt_i in $1
+do
+ ac_safe=`echo "$aprt_i" | sed 'y%./+-%__p_%'`
+ aprt_2=`echo "$aprt_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'`
+ if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
+ eval "ifelse($2,,$aprt_2,$2)=ifelse($3,yes,yes,1)"
+ else
+ eval "ifelse($2,,$aprt_2,$2)=ifelse($3,yes,no,0)"
+ fi
+done
+])
+
+dnl APR_FLAG_FUNCS(FUNC ... [, FLAG-TO-SET] [, "yes" ])
+dnl if FLAG-TO-SET is null, we automagically determine it's name
+dnl prepending "have_" to the function name in FUNC, otherwise
+dnl we use what's provided as FLAG-TO-SET. If the 3rd parameter
+dnl is "yes" then instead of setting to 1 or 0, we set FLAG-TO-SET
+dnl to yes or no.
+dnl
+AC_DEFUN([APR_FLAG_FUNCS], [
+AC_CHECK_FUNCS($1)
+for aprt_j in $1
+do
+ aprt_3="have_$aprt_j"
+ if eval "test \"`echo '$ac_cv_func_'$aprt_j`\" = yes"; then
+ eval "ifelse($2,,$aprt_3,$2)=ifelse($3,yes,yes,1)"
+ else
+ eval "ifelse($2,,$aprt_3,$2)=ifelse($3,yes,no,0)"
+ fi
+done
+])
+
+dnl Iteratively interpolate the contents of the second argument
+dnl until interpolation offers no new result. Then assign the
+dnl final result to $1.
+dnl
+dnl Example:
+dnl
+dnl foo=1
+dnl bar='${foo}/2'
+dnl baz='${bar}/3'
+dnl APR_EXPAND_VAR(fraz, $baz)
+dnl $fraz is now "1/2/3"
+dnl
+AC_DEFUN([APR_EXPAND_VAR], [
+ap_last=
+ap_cur="$2"
+while test "x${ap_cur}" != "x${ap_last}";
+do
+ ap_last="${ap_cur}"
+ ap_cur=`eval "echo ${ap_cur}"`
+done
+$1="${ap_cur}"
+])
+
+dnl
+dnl Removes the value of $3 from the string in $2, strips of any leading
+dnl slashes, and returns the value in $1.
+dnl
+dnl Example:
+dnl orig_path="${prefix}/bar"
+dnl APR_PATH_RELATIVE(final_path, $orig_path, $prefix)
+dnl $final_path now contains "bar"
+AC_DEFUN([APR_PATH_RELATIVE], [
+ap_stripped=`echo $2 | sed -e "s#^$3##"`
+# check if the stripping was successful
+if test "x$2" != "x${ap_stripped}"; then
+ # it was, so strip of any leading slashes
+ $1="`echo ${ap_stripped} | sed -e 's#^/*##'`"
+else
+ # it wasn't so return the original
+ $1="$2"
+fi
+])
+
+dnl APR_HELP_STRING(LHS, RHS)
+dnl Autoconf 2.50 can not handle substr correctly. It does have
+dnl AC_HELP_STRING, so let's try to call it if we can.
+dnl Note: this define must be on one line so that it can be properly returned
+dnl as the help string. When using this macro with a multi-line RHS, ensure
+dnl that you surround the macro invocation with []s
+AC_DEFUN([APR_HELP_STRING], [ifelse(regexp(AC_ACVERSION, 2\.1), -1, AC_HELP_STRING([$1],[$2]),[ ][$1] substr([ ],len($1))[$2])])
+
+dnl
+dnl APR_LAYOUT(configlayout, layoutname [, extravars])
+dnl
+AC_DEFUN([APR_LAYOUT], [
+ if test ! -f $srcdir/config.layout; then
+ echo "** Error: Layout file $srcdir/config.layout not found"
+ echo "** Error: Cannot use undefined layout '$LAYOUT'"
+ exit 1
+ fi
+ # Catch layout names including a slash which will otherwise
+ # confuse the heck out of the sed script.
+ case $2 in
+ */*)
+ echo "** Error: $2 is not a valid layout name"
+ exit 1 ;;
+ esac
+ pldconf=./config.pld
+ changequote({,})
+ sed -e "1s/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*//;1t" \
+ -e "1,/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*/d" \
+ -e '/[ ]*<\/Layout>[ ]*/,$d' \
+ -e "s/^[ ]*//g" \
+ -e "s/:[ ]*/=\'/g" \
+ -e "s/[ ]*$/'/g" \
+ $1 > $pldconf
+ layout_name=$2
+ if test ! -s $pldconf; then
+ echo "** Error: unable to find layout $layout_name"
+ exit 1
+ fi
+ . $pldconf
+ rm $pldconf
+ for var in prefix exec_prefix bindir sbindir libexecdir mandir \
+ sysconfdir datadir includedir localstatedir runtimedir \
+ logfiledir libdir installbuilddir libsuffix $3; do
+ eval "val=\"\$$var\""
+ case $val in
+ *+)
+ val=`echo $val | sed -e 's;\+$;;'`
+ eval "$var=\"\$val\""
+ autosuffix=yes
+ ;;
+ *)
+ autosuffix=no
+ ;;
+ esac
+ val=`echo $val | sed -e 's:\(.\)/*$:\1:'`
+ val=`echo $val | sed -e 's:[\$]\([a-z_]*\):${\1}:g'`
+ if test "$autosuffix" = "yes"; then
+ if echo $val | grep apache >/dev/null; then
+ addtarget=no
+ else
+ addtarget=yes
+ fi
+ if test "$addtarget" = "yes"; then
+ val="$val/apache2"
+ fi
+ fi
+ eval "$var='$val'"
+ done
+ changequote([,])
+])dnl
+
+dnl
+dnl APR_ENABLE_LAYOUT(default layout name [, extra vars])
+dnl
+AC_DEFUN([APR_ENABLE_LAYOUT], [
+AC_ARG_ENABLE(layout,
+[ --enable-layout=LAYOUT],[
+ LAYOUT=$enableval
+])
+
+if test -z "$LAYOUT"; then
+ LAYOUT="$1"
+fi
+APR_LAYOUT($srcdir/config.layout, $LAYOUT, $2)
+
+AC_MSG_CHECKING(for chosen layout)
+AC_MSG_RESULT($layout_name)
+])
+
+
+dnl
+dnl APR_PARSE_ARGUMENTS
+dnl a reimplementation of autoconf's argument parser,
+dnl used here to allow us to co-exist layouts and argument based
+dnl set ups.
+AC_DEFUN([APR_PARSE_ARGUMENTS], [
+ac_prev=
+# Retrieve the command-line arguments. The eval is needed because
+# the arguments are quoted to preserve accuracy.
+eval "set x $ac_configure_args"
+shift
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ ac_optarg=`expr "x$ac_option" : 'x[[^=]]*=\(.*\)'`
+
+ case $ac_option in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir="$ac_optarg" ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir="$ac_optarg" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix="$ac_optarg" ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir="$ac_optarg" ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir="$ac_optarg" ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir="$ac_optarg" ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir="$ac_optarg" ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir="$ac_optarg" ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir="$ac_optarg" ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix="$ac_optarg" ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir="$ac_optarg" ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir="$ac_optarg" ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir="$ac_optarg" ;;
+
+ esac
+done
+
+# Be sure to have absolute paths.
+for ac_var in exec_prefix prefix
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [[\\/$]]* | ?:[[\\/]]* | NONE | '' ) ;;
+ *) AC_MSG_ERROR([expected an absolute path for --$ac_var: $ac_val]);;
+ esac
+done
+
+])dnl
+
+dnl
+dnl APR_CHECK_DEPEND
+dnl
+dnl Determine what program we can use to generate .deps-style dependencies
+dnl
+AC_DEFUN([APR_CHECK_DEPEND], [
+dnl Try to determine what depend program we can use
+dnl All GCC-variants should have -MM.
+dnl If not, then we can check on those, too.
+if test "$GCC" = "yes"; then
+ MKDEP='$(CC) -MM'
+else
+ rm -f conftest.c
+dnl <sys/types.h> should be available everywhere!
+ cat > conftest.c <<EOF
+#include <sys/types.h>
+ int main() { return 0; }
+EOF
+ MKDEP="true"
+ for i in "$CC -MM" "$CC -M" "$CPP -MM" "$CPP -M" "cpp -M"; do
+ AC_MSG_CHECKING([if $i can create proper make dependencies])
+ if $i conftest.c 2>/dev/null | grep 'conftest.o: conftest.c' >/dev/null; then
+ MKDEP=$i
+ AC_MSG_RESULT(yes)
+ break;
+ fi
+ AC_MSG_RESULT(no)
+ done
+ rm -f conftest.c
+fi
+
+AC_SUBST(MKDEP)
+])
+
+dnl
+dnl APR_CHECK_TYPES_COMPATIBLE(TYPE-1, TYPE-2, [ACTION-IF-TRUE])
+dnl
+dnl Try to determine whether two types are the same. Only works
+dnl for gcc and icc.
+dnl
+AC_DEFUN([APR_CHECK_TYPES_COMPATIBLE], [
+define([apr_cvname], apr_cv_typematch_[]translit([$1], [ ], [_])_[]translit([$2], [ ], [_]))
+AC_CACHE_CHECK([whether $1 and $2 are the same], apr_cvname, [
+AC_TRY_COMPILE(AC_INCLUDES_DEFAULT, [
+ int foo[0 - !__builtin_types_compatible_p($1, $2)];
+], [apr_cvname=yes
+$3], [apr_cvname=no])])
+])
diff --git a/build/config.guess b/build/config.guess
new file mode 100755
index 0000000..40eaed4
--- /dev/null
+++ b/build/config.guess
@@ -0,0 +1,1517 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011 Free Software Foundation, Inc.
+
+timestamp='2011-05-11'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner. Please send patches (context
+# diff format) to <config-patches@gnu.org> and include a ChangeLog
+# entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free
+Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm:riscos:*:*|arm:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ case ${UNAME_MACHINE} in
+ pc98)
+ echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ cris:Linux:*:*)
+ echo cris-axis-linux-gnu
+ exit ;;
+ crisv32:Linux:*:*)
+ echo crisv32-axis-linux-gnu
+ exit ;;
+ frv:Linux:*:*)
+ echo frv-unknown-linux-gnu
+ exit ;;
+ i*86:Linux:*:*)
+ LIBC=gnu
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ or32:Linux:*:*)
+ echo or32-unknown-linux-gnu
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-gnu
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-tilera-linux-gnu
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-gnu
+ exit ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ case $UNAME_PROCESSOR in
+ i386)
+ eval $set_cc_for_build
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ UNAME_PROCESSOR="x86_64"
+ fi
+ fi ;;
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-?:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/build/config.sub b/build/config.sub
new file mode 100755
index 0000000..30fdca8
--- /dev/null
+++ b/build/config.sub
@@ -0,0 +1,1760 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011 Free Software Foundation, Inc.
+
+timestamp='2011-03-23'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted GNU ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free
+Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+ | bfin \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fido | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 \
+ | ns16k | ns32k \
+ | open8 \
+ | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pyramid \
+ | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | picochip)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pyramid-* \
+ | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile-* | tilegx-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ # This must be matched before tile*.
+ tilegx*)
+ basic_machine=tilegx-unknown
+ os=-linux-gnu
+ ;;
+ tile*)
+ basic_machine=tile-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -kaos*)
+ os=-kaos
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/build/find_apr.m4 b/build/find_apr.m4
new file mode 100644
index 0000000..925e523
--- /dev/null
+++ b/build/find_apr.m4
@@ -0,0 +1,202 @@
+dnl -------------------------------------------------------- -*- autoconf -*-
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl
+dnl find_apr.m4 : locate the APR include files and libraries
+dnl
+dnl This macro file can be used by applications to find and use the APR
+dnl library. It provides a standardized mechanism for using APR. It supports
+dnl embedding APR into the application source, or locating an installed
+dnl copy of APR.
+dnl
+dnl APR_FIND_APR(srcdir, builddir, implicit-install-check, acceptable-majors,
+dnl detailed-check)
+dnl
+dnl where srcdir is the location of the bundled APR source directory, or
+dnl empty if source is not bundled.
+dnl
+dnl where builddir is the location where the bundled APR will will be built,
+dnl or empty if the build will occur in the srcdir.
+dnl
+dnl where implicit-install-check set to 1 indicates if there is no
+dnl --with-apr option specified, we will look for installed copies.
+dnl
+dnl where acceptable-majors is a space separated list of acceptable major
+dnl version numbers. Often only a single major version will be acceptable.
+dnl If multiple versions are specified, and --with-apr=PREFIX or the
+dnl implicit installed search are used, then the first (leftmost) version
+dnl in the list that is found will be used. Currently defaults to [0 1].
+dnl
+dnl where detailed-check is an M4 macro which sets the apr_acceptable to
+dnl either "yes" or "no". The macro will be invoked for each installed
+dnl copy of APR found, with the apr_config variable set appropriately.
+dnl Only installed copies of APR which are considered acceptable by
+dnl this macro will be considered found. If no installed copies are
+dnl considered acceptable by this macro, apr_found will be set to either
+dnl either "no" or "reconfig".
+dnl
+dnl Sets the following variables on exit:
+dnl
+dnl apr_found : "yes", "no", "reconfig"
+dnl
+dnl apr_config : If the apr-config tool exists, this refers to it. If
+dnl apr_found is "reconfig", then the bundled directory
+dnl should be reconfigured *before* using apr_config.
+dnl
+dnl Note: this macro file assumes that apr-config has been installed; it
+dnl is normally considered a required part of an APR installation.
+dnl
+dnl If a bundled source directory is available and needs to be (re)configured,
+dnl then apr_found is set to "reconfig". The caller should reconfigure the
+dnl (passed-in) source directory, placing the result in the build directory,
+dnl as appropriate.
+dnl
+dnl If apr_found is "yes" or "reconfig", then the caller should use the
+dnl value of apr_config to fetch any necessary build/link information.
+dnl
+
+AC_DEFUN([APR_FIND_APR], [
+ apr_found="no"
+
+ if test "$target_os" = "os2-emx"; then
+ # Scripts don't pass test -x on OS/2
+ TEST_X="test -f"
+ else
+ TEST_X="test -x"
+ fi
+
+ ifelse([$4], [], [
+ ifdef(AC_WARNING,AC_WARNING([$0: missing argument 4 (acceptable-majors): Defaulting to APR 0.x then APR 1.x]))
+ acceptable_majors="0 1"],
+ [acceptable_majors="$4"])
+
+ apr_temp_acceptable_apr_config=""
+ for apr_temp_major in $acceptable_majors
+ do
+ case $apr_temp_major in
+ 0)
+ apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-config"
+ ;;
+ *)
+ apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-$apr_temp_major-config"
+ ;;
+ esac
+ done
+
+ AC_MSG_CHECKING(for APR)
+ AC_ARG_WITH(apr,
+ [ --with-apr=PATH prefix for installed APR or the full path to
+ apr-config],
+ [
+ if test "$withval" = "no" || test "$withval" = "yes"; then
+ AC_MSG_ERROR([--with-apr requires a directory or file to be provided])
+ fi
+
+ for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
+ do
+ for lookdir in "$withval/bin" "$withval"
+ do
+ if $TEST_X "$lookdir/$apr_temp_apr_config_file"; then
+ apr_config="$lookdir/$apr_temp_apr_config_file"
+ ifelse([$5], [], [], [
+ apr_acceptable="yes"
+ $5
+ if test "$apr_acceptable" != "yes"; then
+ AC_MSG_WARN([Found APR in $apr_config, but we think it is considered unacceptable])
+ continue
+ fi])
+ apr_found="yes"
+ break 2
+ fi
+ done
+ done
+
+ if test "$apr_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
+ apr_config="$withval"
+ ifelse([$5], [], [apr_found="yes"], [
+ apr_acceptable="yes"
+ $5
+ if test "$apr_acceptable" = "yes"; then
+ apr_found="yes"
+ fi])
+ fi
+
+ dnl if --with-apr is used, it is a fatal error for its argument
+ dnl to be invalid
+ if test "$apr_found" != "yes"; then
+ AC_MSG_ERROR([the --with-apr parameter is incorrect. It must specify an install prefix, a build directory, or an apr-config file.])
+ fi
+ ],[
+ dnl If we allow installed copies, check those before using bundled copy.
+ if test -n "$3" && test "$3" = "1"; then
+ for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
+ do
+ if $apr_temp_apr_config_file --help > /dev/null 2>&1 ; then
+ apr_config="$apr_temp_apr_config_file"
+ ifelse([$5], [], [], [
+ apr_acceptable="yes"
+ $5
+ if test "$apr_acceptable" != "yes"; then
+ AC_MSG_WARN([skipped APR at $apr_config, version not acceptable])
+ continue
+ fi])
+ apr_found="yes"
+ break
+ else
+ dnl look in some standard places
+ for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
+ if $TEST_X "$lookdir/bin/$apr_temp_apr_config_file"; then
+ apr_config="$lookdir/bin/$apr_temp_apr_config_file"
+ ifelse([$5], [], [], [
+ apr_acceptable="yes"
+ $5
+ if test "$apr_acceptable" != "yes"; then
+ AC_MSG_WARN([skipped APR at $apr_config, version not acceptable])
+ continue
+ fi])
+ apr_found="yes"
+ break 2
+ fi
+ done
+ fi
+ done
+ fi
+ dnl if we have not found anything yet and have bundled source, use that
+ if test "$apr_found" = "no" && test -d "$1"; then
+ apr_temp_abs_srcdir="`cd \"$1\" && pwd`"
+ apr_found="reconfig"
+ apr_bundled_major="`sed -n '/#define.*APR_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \"$1/include/apr_version.h\"`"
+ case $apr_bundled_major in
+ "")
+ AC_MSG_ERROR([failed to find major version of bundled APR])
+ ;;
+ 0)
+ apr_temp_apr_config_file="apr-config"
+ ;;
+ *)
+ apr_temp_apr_config_file="apr-$apr_bundled_major-config"
+ ;;
+ esac
+ if test -n "$2"; then
+ apr_config="$2/$apr_temp_apr_config_file"
+ else
+ apr_config="$1/$apr_temp_apr_config_file"
+ fi
+ fi
+ ])
+
+ AC_MSG_RESULT($apr_found)
+])
diff --git a/build/find_apu.m4 b/build/find_apu.m4
new file mode 100644
index 0000000..7937e00
--- /dev/null
+++ b/build/find_apu.m4
@@ -0,0 +1,211 @@
+dnl -------------------------------------------------------- -*- autoconf -*-
+dnl Licensed to the Apache Software Foundation (ASF) under one or more
+dnl contributor license agreements. See the NOTICE file distributed with
+dnl this work for additional information regarding copyright ownership.
+dnl The ASF licenses this file to You under the Apache License, Version 2.0
+dnl (the "License"); you may not use this file except in compliance with
+dnl the License. You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+
+dnl
+dnl find_apu.m4 : locate the APR-util (APU) include files and libraries
+dnl
+dnl This macro file can be used by applications to find and use the APU
+dnl library. It provides a standardized mechanism for using APU. It supports
+dnl embedding APU into the application source, or locating an installed
+dnl copy of APU.
+dnl
+dnl APR_FIND_APU(srcdir, builddir, implicit-install-check, acceptable-majors,
+dnl detailed-check)
+dnl
+dnl where srcdir is the location of the bundled APU source directory, or
+dnl empty if source is not bundled.
+dnl
+dnl where builddir is the location where the bundled APU will be built,
+dnl or empty if the build will occur in the srcdir.
+dnl
+dnl where implicit-install-check set to 1 indicates if there is no
+dnl --with-apr-util option specified, we will look for installed copies.
+dnl
+dnl where acceptable-majors is a space separated list of acceptable major
+dnl version numbers. Often only a single major version will be acceptable.
+dnl If multiple versions are specified, and --with-apr-util=PREFIX or the
+dnl implicit installed search are used, then the first (leftmost) version
+dnl in the list that is found will be used. Currently defaults to [0 1].
+dnl
+dnl where detailed-check is an M4 macro which sets the apu_acceptable to
+dnl either "yes" or "no". The macro will be invoked for each installed
+dnl copy of APU found, with the apu_config variable set appropriately.
+dnl Only installed copies of APU which are considered acceptable by
+dnl this macro will be considered found. If no installed copies are
+dnl considered acceptable by this macro, apu_found will be set to either
+dnl either "no" or "reconfig".
+dnl
+dnl Sets the following variables on exit:
+dnl
+dnl apu_found : "yes", "no", "reconfig"
+dnl
+dnl apu_config : If the apu-config tool exists, this refers to it. If
+dnl apu_found is "reconfig", then the bundled directory
+dnl should be reconfigured *before* using apu_config.
+dnl
+dnl Note: this macro file assumes that apr-config has been installed; it
+dnl is normally considered a required part of an APR installation.
+dnl
+dnl Note: At this time, we cannot find *both* a source dir and a build dir.
+dnl If both are available, the build directory should be passed to
+dnl the --with-apr-util switch.
+dnl
+dnl Note: the installation layout is presumed to follow the standard
+dnl PREFIX/lib and PREFIX/include pattern. If the APU config file
+dnl is available (and can be found), then non-standard layouts are
+dnl possible, since it will be described in the config file.
+dnl
+dnl If a bundled source directory is available and needs to be (re)configured,
+dnl then apu_found is set to "reconfig". The caller should reconfigure the
+dnl (passed-in) source directory, placing the result in the build directory,
+dnl as appropriate.
+dnl
+dnl If apu_found is "yes" or "reconfig", then the caller should use the
+dnl value of apu_config to fetch any necessary build/link information.
+dnl
+
+AC_DEFUN([APR_FIND_APU], [
+ apu_found="no"
+
+ if test "$target_os" = "os2-emx"; then
+ # Scripts don't pass test -x on OS/2
+ TEST_X="test -f"
+ else
+ TEST_X="test -x"
+ fi
+
+ ifelse([$4], [],
+ [
+ ifdef(AC_WARNING,([$0: missing argument 4 (acceptable-majors): Defaulting to APU 0.x then APU 1.x]))
+ acceptable_majors="0 1"
+ ], [acceptable_majors="$4"])
+
+ apu_temp_acceptable_apu_config=""
+ for apu_temp_major in $acceptable_majors
+ do
+ case $apu_temp_major in
+ 0)
+ apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-config"
+ ;;
+ *)
+ apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-$apu_temp_major-config"
+ ;;
+ esac
+ done
+
+ AC_MSG_CHECKING(for APR-util)
+ AC_ARG_WITH(apr-util,
+ [ --with-apr-util=PATH prefix for installed APU or the full path to
+ apu-config],
+ [
+ if test "$withval" = "no" || test "$withval" = "yes"; then
+ AC_MSG_ERROR([--with-apr-util requires a directory or file to be provided])
+ fi
+
+ for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
+ do
+ for lookdir in "$withval/bin" "$withval"
+ do
+ if $TEST_X "$lookdir/$apu_temp_apu_config_file"; then
+ apu_config="$lookdir/$apu_temp_apu_config_file"
+ ifelse([$5], [], [], [
+ apu_acceptable="yes"
+ $5
+ if test "$apu_acceptable" != "yes"; then
+ AC_MSG_WARN([Found APU in $apu_config, but it is considered unacceptable])
+ continue
+ fi])
+ apu_found="yes"
+ break 2
+ fi
+ done
+ done
+
+ if test "$apu_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
+ apu_config="$withval"
+ ifelse([$5], [], [apu_found="yes"], [
+ apu_acceptable="yes"
+ $5
+ if test "$apu_acceptable" = "yes"; then
+ apu_found="yes"
+ fi])
+ fi
+
+ dnl if --with-apr-util is used, it is a fatal error for its argument
+ dnl to be invalid
+ if test "$apu_found" != "yes"; then
+ AC_MSG_ERROR([the --with-apr-util parameter is incorrect. It must specify an install prefix, a build directory, or an apu-config file.])
+ fi
+ ],[
+ if test -n "$3" && test "$3" = "1"; then
+ for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
+ do
+ if $apu_temp_apu_config_file --help > /dev/null 2>&1 ; then
+ apu_config="$apu_temp_apu_config_file"
+ ifelse([$5], [], [], [
+ apu_acceptable="yes"
+ $5
+ if test "$apu_acceptable" != "yes"; then
+ AC_MSG_WARN([skipped APR-util at $apu_config, version not acceptable])
+ continue
+ fi])
+ apu_found="yes"
+ break
+ else
+ dnl look in some standard places (apparently not in builtin/default)
+ for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
+ if $TEST_X "$lookdir/bin/$apu_temp_apu_config_file"; then
+ apu_config="$lookdir/bin/$apu_temp_apu_config_file"
+ ifelse([$5], [], [], [
+ apu_acceptable="yes"
+ $5
+ if test "$apu_acceptable" != "yes"; then
+ AC_MSG_WARN([skipped APR-util at $apu_config, version not acceptable])
+ continue
+ fi])
+ apu_found="yes"
+ break 2
+ fi
+ done
+ fi
+ done
+ fi
+ dnl if we have not found anything yet and have bundled source, use that
+ if test "$apu_found" = "no" && test -d "$1"; then
+ apu_temp_abs_srcdir="`cd \"$1\" && pwd`"
+ apu_found="reconfig"
+ apu_bundled_major="`sed -n '/#define.*APU_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \"$1/include/apu_version.h\"`"
+ case $apu_bundled_major in
+ "")
+ AC_MSG_ERROR([failed to find major version of bundled APU])
+ ;;
+ 0)
+ apu_temp_apu_config_file="apu-config"
+ ;;
+ *)
+ apu_temp_apu_config_file="apu-$apu_bundled_major-config"
+ ;;
+ esac
+ if test -n "$2"; then
+ apu_config="$2/$apu_temp_apu_config_file"
+ else
+ apu_config="$1/$apu_temp_apu_config_file"
+ fi
+ fi
+ ])
+
+ AC_MSG_RESULT($apu_found)
+])
diff --git a/build/gen_def.py b/build/gen_def.py
new file mode 100755
index 0000000..c76b9a4
--- /dev/null
+++ b/build/gen_def.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+#
+# gen_def.py : Generate the .DEF file for Windows builds
+#
+# ====================================================================
+# Copyright 2002-2010 Justin Erenkrantz and Greg Stein
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ====================================================================
+#
+#
+# Typically, this script is used like:
+#
+# C:\PATH> python build/gen_def.py serf.h serf_bucket_types.h serf_bucket_util.h > build/serf.def
+#
+
+import re
+import sys
+
+# This regex parses function declarations that look like:
+#
+# return_type serf_func1(...
+# return_type *serf_func2(...
+#
+# Where return_type is a combination of words and "*" each separated by a
+# SINGLE space. If the function returns a pointer type (like serf_func2),
+# then a space may exist between the "*" and the function name. Thus,
+# a more complicated example might be:
+# const type * const * serf_func3(...
+#
+_funcs = re.compile(r'^(?:(?:\w+|\*) )+\*?(serf_[a-z][a-z_0-9]*)\(',
+ re.MULTILINE)
+
+# This regex parses the bucket type definitions which look like:
+#
+# extern const serf_bucket_type_t serf_bucket_type_FOO;
+#
+_types = re.compile(r'^extern const serf_bucket_type_t (serf_[a-z_]*);',
+ re.MULTILINE)
+
+
+def extract_exports(fname):
+ content = open(fname).read()
+ exports = [ ]
+ for name in _funcs.findall(content):
+ exports.append(name)
+ for name in _types.findall(content):
+ exports.append(name)
+ return exports
+
+
+if __name__ == '__main__':
+ # run the extraction over each file mentioned
+ import sys
+ print("EXPORTS")
+ for fname in sys.argv[1:]:
+ for func in extract_exports(fname):
+ print(func)
diff --git a/build/get-version.sh b/build/get-version.sh
new file mode 100755
index 0000000..fd685b2
--- /dev/null
+++ b/build/get-version.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# extract version numbers from a header file
+#
+# USAGE: get-version.sh CMD VERSION_HEADER PREFIX
+# where CMD is one of: all, major, libtool
+# where PREFIX is the prefix to {MAJOR|MINOR|PATCH}_VERSION defines
+#
+# get-version.sh all returns a dotted version number
+# get-version.sh major returns just the major version number
+# get-version.sh libtool returns a version "libtool -version-info" format
+#
+
+if test $# != 3; then
+ echo "USAGE: $0 CMD VERSION_HEADER PREFIX"
+ echo " where CMD is one of: all, major, libtool"
+ exit 1
+fi
+
+major_sed="/#define.*$3_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
+minor_sed="/#define.*$3_MINOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
+patch_sed="/#define.*$3_PATCH_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p"
+major="`sed -n $major_sed $2`"
+minor="`sed -n $minor_sed $2`"
+patch="`sed -n $patch_sed $2`"
+
+if test "$1" = "all"; then
+ echo ${major}.${minor}.${patch}
+elif test "$1" = "major"; then
+ echo ${major}
+elif test "$1" = "libtool"; then
+ # Yes, ${minor}:${patch}:${minor} is correct due to libtool idiocy.
+ echo ${minor}:${patch}:${minor}
+else
+ echo "ERROR: unknown version CMD ($1)"
+ exit 1
+fi
diff --git a/build/install.sh b/build/install.sh
new file mode 100755
index 0000000..9a8821f
--- /dev/null
+++ b/build/install.sh
@@ -0,0 +1,112 @@
+#!/bin/sh
+##
+## install.sh -- install a program, script or datafile
+##
+## Based on `install-sh' from the X Consortium's X11R5 distribution
+## as of 89/12/18 which is freely available.
+## Cleaned up for Apache's Autoconf-style Interface (APACI)
+## by Ralf S. Engelschall <rse@apache.org>
+##
+#
+# This script falls under the Apache License.
+# See http://www.apache.org/docs/LICENSE
+
+
+#
+# put in absolute paths if you don't have them in your path;
+# or use env. vars.
+#
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+
+#
+# parse argument line
+#
+instcmd="$mvprog"
+chmodcmd=""
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+ext=""
+src=""
+dst=""
+while [ "x$1" != "x" ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift; continue
+ ;;
+ -m) chmodcmd="$chmodprog $2"
+ shift; shift; continue
+ ;;
+ -o) chowncmd="$chownprog $2"
+ shift; shift; continue
+ ;;
+ -g) chgrpcmd="$chgrpprog $2"
+ shift; shift; continue
+ ;;
+ -s) stripcmd="$stripprog"
+ shift; continue
+ ;;
+ -S) stripcmd="$stripprog $2"
+ shift; shift; continue
+ ;;
+ -e) ext="$2"
+ shift; shift; continue
+ ;;
+ *) if [ "x$src" = "x" ]; then
+ src=$1
+ else
+ dst=$1
+ fi
+ shift; continue
+ ;;
+ esac
+done
+if [ "x$src" = "x" ]; then
+ echo "install.sh: no input file specified"
+ exit 1
+fi
+if [ "x$dst" = "x" ]; then
+ echo "install.sh: no destination specified"
+ exit 1
+fi
+
+#
+# If destination is a directory, append the input filename; if
+# your system does not like double slashes in filenames, you may
+# need to add some logic
+#
+if [ -d $dst ]; then
+ dst="$dst/`basename $src`"
+fi
+
+# Add a possible extension (such as ".exe") to src and dst
+src="$src$ext"
+dst="$dst$ext"
+
+# Make a temp file name in the proper directory.
+dstdir=`dirname $dst`
+dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+$instcmd $src $dsttmp
+
+# And set any options; do chmod last to preserve setuid bits
+if [ "x$chowncmd" != "x" ]; then $chowncmd $dsttmp; fi
+if [ "x$chgrpcmd" != "x" ]; then $chgrpcmd $dsttmp; fi
+if [ "x$stripcmd" != "x" ]; then $stripcmd $dsttmp; fi
+if [ "x$chmodcmd" != "x" ]; then $chmodcmd $dsttmp; fi
+
+# Now rename the file to the real destination.
+$rmcmd $dst
+$mvcmd $dsttmp $dst
+
+exit 0
+
diff --git a/build/serf.def b/build/serf.def
new file mode 100644
index 0000000..3aa46c2
--- /dev/null
+++ b/build/serf.def
@@ -0,0 +1,136 @@
+EXPORTS
+serf_error_string
+serf_context_create
+serf_context_create_ex
+serf_event_trigger
+serf_context_run
+serf_context_prerun
+serf_context_set_progress_cb
+serf_connection_create
+serf_connection_create2
+serf_listener_create
+serf_incoming_create
+serf_connection_reset
+serf_connection_close
+serf_connection_set_max_outstanding_requests
+serf_connection_set_async_responses
+serf_connection_request_create
+serf_connection_priority_request_create
+serf_request_cancel
+serf_request_get_pool
+serf_request_get_alloc
+serf_request_get_conn
+serf_request_set_handler
+serf_config_proxy
+serf_config_authn_types
+serf_config_credentials_callback
+serf_context_bucket_socket_create
+serf_request_bucket_request_create
+serf_bucket_allocator_create
+serf_bucket_allocator_get_pool
+serf_linebuf_init
+serf_linebuf_fetch
+serf_debug__record_read
+serf_debug__entered_loop
+serf_debug__closed_conn
+serf_debug__bucket_destroy
+serf_debug__bucket_alloc_check
+serf_lib_version
+serf_bucket_request_create
+serf_bucket_request_get_headers
+serf_bucket_request_become
+serf_bucket_request_set_root
+serf_bucket_response_create
+serf_bucket_response_status
+serf_bucket_response_wait_for_headers
+serf_bucket_response_get_headers
+serf_bucket_response_set_head
+serf_bucket_bwtp_frame_get_channel
+serf_bucket_bwtp_frame_get_type
+serf_bucket_bwtp_frame_get_phrase
+serf_bucket_bwtp_frame_get_headers
+serf_bucket_bwtp_channel_open
+serf_bucket_bwtp_channel_close
+serf_bucket_bwtp_header_create
+serf_bucket_bwtp_message_create
+serf_bucket_bwtp_incoming_frame_create
+serf_bucket_bwtp_incoming_frame_wait_for_headers
+serf_bucket_aggregate_cleanup
+serf_bucket_aggregate_create
+serf_bucket_aggregate_become
+serf_bucket_aggregate_prepend
+serf_bucket_aggregate_append
+serf_bucket_aggregate_hold_open
+serf_bucket_aggregate_prepend_iovec
+serf_bucket_aggregate_append_iovec
+serf_bucket_file_create
+serf_bucket_socket_create
+serf_bucket_socket_set_read_progress_cb
+serf_bucket_simple_create
+serf_bucket_simple_copy_create
+serf_bucket_mmap_create
+serf_bucket_headers_create
+serf_bucket_headers_set
+serf_bucket_headers_setc
+serf_bucket_headers_setn
+serf_bucket_headers_setx
+serf_bucket_headers_get
+serf_bucket_headers_do
+serf_bucket_chunk_create
+serf_bucket_dechunk_create
+serf_bucket_deflate_create
+serf_bucket_limit_create
+serf_ssl_client_cert_provider_set
+serf_ssl_client_cert_password_set
+serf_ssl_server_cert_callback_set
+serf_ssl_server_cert_chain_callback_set
+serf_ssl_use_default_certificates
+serf_ssl_set_hostname
+serf_ssl_cert_depth
+serf_ssl_cert_issuer
+serf_ssl_cert_subject
+serf_ssl_cert_certificate
+serf_ssl_cert_export
+serf_ssl_load_cert_file
+serf_ssl_trust_cert
+serf_bucket_ssl_encrypt_create
+serf_bucket_ssl_encrypt_context_get
+serf_bucket_ssl_decrypt_create
+serf_bucket_ssl_decrypt_context_get
+serf_bucket_barrier_create
+serf_bucket_iovec_create
+serf_bucket_type_request
+serf_bucket_type_response
+serf_bucket_type_bwtp_frame
+serf_bucket_type_bwtp_incoming_frame
+serf_bucket_type_aggregate
+serf_bucket_type_file
+serf_bucket_type_socket
+serf_bucket_type_simple
+serf_bucket_type_mmap
+serf_bucket_type_headers
+serf_bucket_type_chunk
+serf_bucket_type_dechunk
+serf_bucket_type_deflate
+serf_bucket_type_limit
+serf_bucket_type_ssl_encrypt
+serf_bucket_type_ssl_decrypt
+serf_bucket_type_barrier
+serf_bucket_type_iovec
+serf_bucket_create
+serf_default_read_iovec
+serf_default_read_for_sendfile
+serf_default_read_bucket
+serf_default_destroy
+serf_default_destroy_and_data
+serf_bucket_mem_alloc
+serf_bucket_mem_calloc
+serf_bucket_mem_free
+serf_bstrmemdup
+serf_bmemdup
+serf_bstrdup
+serf_util_readline
+serf_databuf_init
+serf_databuf_read
+serf_databuf_readline
+serf_databuf_peek
diff --git a/buildconf b/buildconf
new file mode 100755
index 0000000..037cdbc
--- /dev/null
+++ b/buildconf
@@ -0,0 +1,119 @@
+#!/bin/sh
+#
+# Copyright 2005 Justin Erenkrantz and Greg Stein
+# Copyright 2005 The Apache Software Foundation or its licensors, as
+# applicable.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# buildconf: Build the support scripts needed to compile from a
+# checked-out version of the source code.
+
+# set a couple of defaults for where we should be looking for our support libs.
+# can be overridden with --with-apr=[dir] and --with-apr-util=[dir]
+
+apr_src_dir="apr ../apr"
+apu_src_dir="apr-util ../apr-util"
+
+while test $# -gt 0
+do
+ # Normalize
+ case "$1" in
+ -*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ case "$1" in
+ --with-apr=*)
+ apr_src_dir=$optarg
+ ;;
+ esac
+
+ case "$1" in
+ --with-apr-util=*)
+ apu_src_dir=$optarg
+ ;;
+ esac
+
+ shift
+done
+
+#
+# Check to be sure that we have the srclib dependencies checked-out
+#
+
+should_exit=0
+apr_found=0
+apu_found=0
+
+for dir in $apr_src_dir
+do
+ if [ -d "${dir}" -a -f "${dir}/build/apr_common.m4" ]; then
+ echo "found apr source: ${dir}"
+ apr_src_dir=$dir
+ apr_found=1
+ break
+ fi
+done
+
+if [ $apr_found -lt 1 ]; then
+ echo ""
+ echo "You don't have a copy of the apr source in srclib/apr. "
+ echo "Please get the source using the following instructions,"
+ echo "or specify the location of the source with "
+ echo "--with-apr=[path to apr] :"
+ echo ""
+ echo " svn co http://svn.apache.org/repos/asf/apr/apr/trunk srclib/apr"
+ echo ""
+ should_exit=1
+fi
+
+for dir in $apu_src_dir
+do
+ if [ -d "${dir}" -a -f "${dir}/Makefile.in" ]; then
+ echo "found apr-util source: ${dir}"
+ apu_src_dir=$dir
+ apu_found=1
+ break
+ fi
+done
+
+if [ $apu_found -lt 1 ]; then
+ echo ""
+ echo "APR-util not found. Assuming you are using APR 2.x."
+ echo ""
+ apu_src_dir=
+fi
+
+if [ $should_exit -gt 0 ]; then
+ exit 1
+fi
+
+echo copying build files
+cp $apr_src_dir/build/config.guess $apr_src_dir/build/config.sub \
+ $apr_src_dir/build/install.sh $apr_src_dir/build/apr_common.m4 \
+ $apr_src_dir/build/find_apr.m4 $apr_src_dir/build/get-version.sh build
+
+if [ -n "$apu_src_dir" -a -d "$apu_src_dir" ] ; then
+ cp $apu_src_dir/build/find_apu.m4 build
+fi
+
+echo generating configure
+${AUTOCONF:-autoconf}
+
+# Remove autoconf 2.5x's cache directory
+rm -rf autom4te*.cache
+
+echo generating serf.def
+./build/gen_def.py serf.h serf_bucket_*.h > build/serf.def
diff --git a/config.layout b/config.layout
new file mode 100644
index 0000000..a27e081
--- /dev/null
+++ b/config.layout
@@ -0,0 +1,26 @@
+##
+## config.layout -- Pre-defined Installation Path Layouts
+##
+## Hints:
+## - layouts can be loaded with configure's --enable-layout=ID option
+## - when no --enable-layout option is given, the default layout is `serf'
+## - a trailing plus character (`+') on paths is replaced with a
+## `/<target>' suffix where <target> is currently hardcoded to 'serf'.
+## (This may become a configurable parameter at some point.)
+##
+
+<Layout Serf>
+ prefix: /usr/local/serf
+ exec_prefix: ${prefix}
+ bindir: ${exec_prefix}/bin
+ sbindir: ${exec_prefix}/bin
+ libdir: ${exec_prefix}/lib
+ libexecdir: ${exec_prefix}/modules
+ mandir: ${prefix}/man
+ sysconfdir: ${prefix}/conf
+ datadir: ${prefix}
+ installbuilddir: ${datadir}/build-${SERF_MAJOR_VERSION}
+ includedir: ${prefix}/include/serf-${SERF_MAJOR_VERSION}
+ localstatedir: ${prefix}
+ libsuffix: -${SERF_MAJOR_VERSION}
+</Layout>
diff --git a/configure b/configure
new file mode 100755
index 0000000..4e45fdf
--- /dev/null
+++ b/configure
@@ -0,0 +1,6257 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.61.
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+as_nl='
+'
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+if test "x$CONFIG_SHELL" = x; then
+ if (eval ":") 2>/dev/null; then
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+
+ if test $as_have_required = yes && (eval ":
+(as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=\$LINENO
+ as_lineno_2=\$LINENO
+ test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
+ test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
+") 2> /dev/null; then
+ :
+else
+ as_candidate_shells=
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ case $as_dir in
+ /*)
+ for as_base in sh bash ksh sh5; do
+ as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
+ done;;
+ esac
+done
+IFS=$as_save_IFS
+
+
+ for as_shell in $as_candidate_shells $SHELL; do
+ # Try only shells that exist, to save several forks.
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { ("$as_shell") 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+_ASEOF
+}; then
+ CONFIG_SHELL=$as_shell
+ as_have_required=yes
+ if { "$as_shell" 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+(as_func_return () {
+ (exit $1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = "$1" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test $exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
+
+_ASEOF
+}; then
+ break
+fi
+
+fi
+
+ done
+
+ if test "x$CONFIG_SHELL" != x; then
+ for as_var in BASH_ENV ENV
+ do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ done
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+
+ if test $as_have_required = no; then
+ echo This script requires a shell more modern than all the
+ echo shells that I found on your system. Please install a
+ echo modern shell, or manually run the script under such a
+ echo shell if you do have one.
+ { (exit 1); exit 1; }
+fi
+
+
+fi
+
+fi
+
+
+
+(eval "as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0") || {
+ echo No shell found that supports shell functions.
+ echo Please tell autoconf@gnu.org about your system,
+ echo including any error possibly output before this
+ echo message
+}
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir
+fi
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+
+exec 7<&0 </dev/null 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Identity of this package.
+PACKAGE_NAME=
+PACKAGE_TARNAME=
+PACKAGE_VERSION=
+PACKAGE_STRING=
+PACKAGE_BUGREPORT=
+
+ac_unique_file="context.c"
+ac_default_prefix=/usr/local/serf
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='SHELL
+PATH_SEPARATOR
+PACKAGE_NAME
+PACKAGE_TARNAME
+PACKAGE_VERSION
+PACKAGE_STRING
+PACKAGE_BUGREPORT
+exec_prefix
+prefix
+program_transform_name
+bindir
+sbindir
+libexecdir
+datarootdir
+datadir
+sysconfdir
+sharedstatedir
+localstatedir
+includedir
+oldincludedir
+docdir
+infodir
+htmldir
+dvidir
+pdfdir
+psdir
+libdir
+localedir
+mandir
+DEFS
+ECHO_C
+ECHO_N
+ECHO_T
+LIBS
+build_alias
+host_alias
+target_alias
+mkdir_p
+build
+build_cpu
+build_vendor
+build_os
+host
+host_cpu
+host_vendor
+host_os
+target
+target_cpu
+target_vendor
+target_os
+LTFLAGS
+APR_LIBTOOL
+APR_BINDIR
+APR_INCLUDES
+APR_VERSION
+APR_CONFIG
+APU_BINDIR
+APU_INCLUDES
+APU_VERSION
+APU_CONFIG
+CC
+CFLAGS
+LDFLAGS
+CPPFLAGS
+ac_ct_CC
+EXEEXT
+OBJEXT
+CPP
+INSTALL_PROGRAM
+INSTALL_SCRIPT
+INSTALL_DATA
+SERF_MAJOR_VERSION
+SERF_DOTTED_VERSION
+SERF_BUILD_SRCLIB_DIRS
+SERF_CLEAN_SRCLIB_DIRS
+GREP
+EGREP
+SERF_LIBS
+EXTRA_CPPFLAGS
+EXTRA_CFLAGS
+EXTRA_CXXFLAGS
+EXTRA_LDFLAGS
+EXTRA_LIBS
+EXTRA_INCLUDES
+LIBOBJS
+LTLIBOBJS'
+ac_subst_files=''
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
+ eval enable_$ac_feature=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
+ eval enable_$ac_feature=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
+ eval with_$ac_package=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
+ eval with_$ac_package=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+# Be sure to have absolute directory names.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; }
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ { echo "$as_me: error: Working directory cannot be determined" >&2
+ { (exit 1); exit 1; }; }
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ { echo "$as_me: error: pwd does not report name of working directory" >&2
+ { (exit 1); exit 1; }; }
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$0" ||
+$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$0" : 'X\(//\)[^/]' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$0" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2
+ { (exit 1); exit 1; }; }
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures this package to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+ --target=TARGET configure for building compilers for TARGET [HOST]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-layout=LAYOUT
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-apr=PATH prefix for installed APR or the full path to
+ apr-config
+ --with-apr-util=PATH prefix for installed APU or the full path to
+ apu-config
+ --with-openssl=PATH Path to OpenSSL (eg. /usr/local/ssl)
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" || continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+configure
+generated by GNU Autoconf 2.61
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by $as_me, which was
+generated by GNU Autoconf 2.61. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ echo "PATH: $as_dir"
+done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------------- ##
+## File substitutions. ##
+## ------------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ echo "$as_me: caught signal $ac_signal"
+ echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer explicitly selected file to automatically selected ones.
+if test -n "$CONFIG_SITE"; then
+ set x "$CONFIG_SITE"
+elif test "x$prefix" != xNONE; then
+ set x "$prefix/share/config.site" "$prefix/etc/config.site"
+else
+ set x "$ac_default_prefix/share/config.site" \
+ "$ac_default_prefix/etc/config.site"
+fi
+shift
+for ac_site_file
+do
+ if test -r "$ac_site_file"; then
+ { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
+echo "$as_me: former value: $ac_old_val" >&2;}
+ { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
+echo "$as_me: current value: $ac_new_val" >&2;}
+ ac_cache_corrupted=:
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+ac_aux_dir=
+for ac_dir in build "$srcdir"/build; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in build \"$srcdir\"/build" >&5
+echo "$as_me: error: cannot find install-sh or install.sh in build \"$srcdir\"/build" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --enable-layout was given.
+if test "${enable_layout+set}" = set; then
+ enableval=$enable_layout;
+ LAYOUT=$enableval
+
+fi
+
+
+if test -z "$LAYOUT"; then
+ LAYOUT="Serf"
+fi
+
+ if test ! -f $srcdir/config.layout; then
+ echo "** Error: Layout file $srcdir/config.layout not found"
+ echo "** Error: Cannot use undefined layout '$LAYOUT'"
+ exit 1
+ fi
+ # Catch layout names including a slash which will otherwise
+ # confuse the heck out of the sed script.
+ case $LAYOUT in
+ */*)
+ echo "** Error: $LAYOUT is not a valid layout name"
+ exit 1 ;;
+ esac
+ pldconf=./config.pld
+
+ sed -e "1s/[ ]*<[lL]ayout[ ]*$LAYOUT[ ]*>[ ]*//;1t" \
+ -e "1,/[ ]*<[lL]ayout[ ]*$LAYOUT[ ]*>[ ]*/d" \
+ -e '/[ ]*<\/Layout>[ ]*/,$d' \
+ -e "s/^[ ]*//g" \
+ -e "s/:[ ]*/=\'/g" \
+ -e "s/[ ]*$/'/g" \
+ $srcdir/config.layout > $pldconf
+ layout_name=$LAYOUT
+ if test ! -s $pldconf; then
+ echo "** Error: unable to find layout $layout_name"
+ exit 1
+ fi
+ . $pldconf
+ rm $pldconf
+ for var in prefix exec_prefix bindir sbindir libexecdir mandir \
+ sysconfdir datadir includedir localstatedir runtimedir \
+ logfiledir libdir installbuilddir libsuffix ; do
+ eval "val=\"\$$var\""
+ case $val in
+ *+)
+ val=`echo $val | sed -e 's;\+$;;'`
+ eval "$var=\"\$val\""
+ autosuffix=yes
+ ;;
+ *)
+ autosuffix=no
+ ;;
+ esac
+ val=`echo $val | sed -e 's:\(.\)/*$:\1:'`
+ val=`echo $val | sed -e 's:[\$]\([a-z_]*\):$\1:g'`
+ if test "$autosuffix" = "yes"; then
+ if echo $val | grep apache >/dev/null; then
+ addtarget=no
+ else
+ addtarget=yes
+ fi
+ if test "$addtarget" = "yes"; then
+ val="$val/apache2"
+ fi
+ fi
+ eval "$var='$val'"
+ done
+
+
+
+{ echo "$as_me:$LINENO: checking for chosen layout" >&5
+echo $ECHO_N "checking for chosen layout... $ECHO_C" >&6; }
+{ echo "$as_me:$LINENO: result: $layout_name" >&5
+echo "${ECHO_T}$layout_name" >&6; }
+
+
+
+ac_prev=
+# Retrieve the command-line arguments. The eval is needed because
+# the arguments are quoted to preserve accuracy.
+eval "set x $ac_configure_args"
+shift
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval "$ac_prev=\$ac_option"
+ ac_prev=
+ continue
+ fi
+
+ ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'`
+
+ case $ac_option in
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir="$ac_optarg" ;;
+
+ -datadir | --datadir | --datadi | --datad | --data | --dat | --da)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \
+ | --da=*)
+ datadir="$ac_optarg" ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix="$ac_optarg" ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir="$ac_optarg" ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir="$ac_optarg" ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir="$ac_optarg" ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir="$ac_optarg" ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst \
+ | --locals | --local | --loca | --loc | --lo)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* \
+ | --locals=* | --local=* | --loca=* | --loc=* | --lo=*)
+ localstatedir="$ac_optarg" ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir="$ac_optarg" ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix="$ac_optarg" ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir="$ac_optarg" ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir="$ac_optarg" ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir="$ac_optarg" ;;
+
+ esac
+done
+
+# Be sure to have absolute paths.
+for ac_var in exec_prefix prefix
+do
+ eval ac_val=$`echo $ac_var`
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* | NONE | '' ) ;;
+ *) { { echo "$as_me:$LINENO: error: expected an absolute path for --$ac_var: $ac_val" >&5
+echo "$as_me: error: expected an absolute path for --$ac_var: $ac_val" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+
+
+ apr_ste_save_CPPFLAGS="$CPPFLAGS"
+
+
+ apr_ste_save_CFLAGS="$CFLAGS"
+
+
+ apr_ste_save_CXXFLAGS="$CXXFLAGS"
+
+
+ apr_ste_save_LDFLAGS="$LDFLAGS"
+
+
+ apr_ste_save_LIBS="$LIBS"
+
+
+ apr_ste_save_INCLUDES="$INCLUDES"
+
+
+
+ rm -f config.nice
+ cat >config.nice<<EOF
+#! /bin/sh
+#
+# Created by configure
+
+EOF
+ if test -n "$CC"; then
+ echo "CC=\"$CC\"; export CC" >> config.nice
+ fi
+ if test -n "$CFLAGS"; then
+ echo "CFLAGS=\"$CFLAGS\"; export CFLAGS" >> config.nice
+ fi
+ if test -n "$CPPFLAGS"; then
+ echo "CPPFLAGS=\"$CPPFLAGS\"; export CPPFLAGS" >> config.nice
+ fi
+ if test -n "$LDFLAGS"; then
+ echo "LDFLAGS=\"$LDFLAGS\"; export LDFLAGS" >> config.nice
+ fi
+ if test -n "$LTFLAGS"; then
+ echo "LTFLAGS=\"$LTFLAGS\"; export LTFLAGS" >> config.nice
+ fi
+ if test -n "$LIBS"; then
+ echo "LIBS=\"$LIBS\"; export LIBS" >> config.nice
+ fi
+ if test -n "$INCLUDES"; then
+ echo "INCLUDES=\"$INCLUDES\"; export INCLUDES" >> config.nice
+ fi
+ if test -n "$NOTEST_CFLAGS"; then
+ echo "NOTEST_CFLAGS=\"$NOTEST_CFLAGS\"; export NOTEST_CFLAGS" >> config.nice
+ fi
+ if test -n "$NOTEST_CPPFLAGS"; then
+ echo "NOTEST_CPPFLAGS=\"$NOTEST_CPPFLAGS\"; export NOTEST_CPPFLAGS" >> config.nice
+ fi
+ if test -n "$NOTEST_LDFLAGS"; then
+ echo "NOTEST_LDFLAGS=\"$NOTEST_LDFLAGS\"; export NOTEST_LDFLAGS" >> config.nice
+ fi
+ if test -n "$NOTEST_LIBS"; then
+ echo "NOTEST_LIBS=\"$NOTEST_LIBS\"; export NOTEST_LIBS" >> config.nice
+ fi
+
+ # Retrieve command-line arguments.
+ eval "set x $0 $ac_configure_args"
+ shift
+
+ for arg
+ do
+
+ap_last=
+ap_cur="$arg"
+while test "x${ap_cur}" != "x${ap_last}";
+do
+ ap_last="${ap_cur}"
+ ap_cur=`eval "echo ${ap_cur}"`
+done
+arg="${ap_cur}"
+
+ echo "\"$arg\" \\" >> config.nice
+ done
+ echo '"$@"' >> config.nice
+ chmod +x config.nice
+
+
+nl='
+'
+
+ { echo "$as_me:$LINENO: checking for working mkdir -p" >&5
+echo $ECHO_N "checking for working mkdir -p... $ECHO_C" >&6; }
+if test "${ac_cv_mkdir_p+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+ test -d conftestdir && rm -rf conftestdir
+ mkdir -p conftestdir/somedir >/dev/null 2>&1
+ if test -d conftestdir/somedir; then
+ ac_cv_mkdir_p=yes
+ else
+ ac_cv_mkdir_p=no
+ fi
+ rm -rf conftestdir
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_mkdir_p" >&5
+echo "${ECHO_T}$ac_cv_mkdir_p" >&6; }
+ if test "$ac_cv_mkdir_p" = "yes"; then
+ mkdir_p="mkdir -p"
+ else
+ mkdir_p="$top_srcdir/build/mkdir.sh"
+ fi
+
+
+
+
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ { { echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5
+echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;}
+ { (exit 1); exit 1; }; }
+
+{ echo "$as_me:$LINENO: checking build system type" >&5
+echo $ECHO_N "checking build system type... $ECHO_C" >&6; }
+if test "${ac_cv_build+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5
+echo "$as_me: error: cannot guess build type; you must specify one" >&2;}
+ { (exit 1); exit 1; }; }
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5
+echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_build" >&5
+echo "${ECHO_T}$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) { { echo "$as_me:$LINENO: error: invalid value of canonical build" >&5
+echo "$as_me: error: invalid value of canonical build" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ echo "$as_me:$LINENO: checking host system type" >&5
+echo $ECHO_N "checking host system type... $ECHO_C" >&6; }
+if test "${ac_cv_host+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5
+echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5
+echo "${ECHO_T}$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5
+echo "$as_me: error: invalid value of canonical host" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+{ echo "$as_me:$LINENO: checking target system type" >&5
+echo $ECHO_N "checking target system type... $ECHO_C" >&6; }
+if test "${ac_cv_target+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "x$target_alias" = x; then
+ ac_cv_target=$ac_cv_host
+else
+ ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` ||
+ { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $target_alias failed" >&5
+echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $target_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_target" >&5
+echo "${ECHO_T}$ac_cv_target" >&6; }
+case $ac_cv_target in
+*-*-*) ;;
+*) { { echo "$as_me:$LINENO: error: invalid value of canonical target" >&5
+echo "$as_me: error: invalid value of canonical target" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+target=$ac_cv_target
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_target
+shift
+target_cpu=$1
+target_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+target_os=$*
+IFS=$ac_save_IFS
+case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac
+
+
+# The aliases save the names the user supplied, while $host etc.
+# will get canonicalized.
+test -n "$target_alias" &&
+ test "$program_prefix$program_suffix$program_transform_name" = \
+ NONENONEs,x,x, &&
+ program_prefix=${target_alias}-
+
+orig_prefix="$prefix"
+
+echo $ac_n "${nl}Configuring Apache Portable Runtime library...${nl}"
+
+
+ apr_found="no"
+
+ if test "$target_os" = "os2-emx"; then
+ # Scripts don't pass test -x on OS/2
+ TEST_X="test -f"
+ else
+ TEST_X="test -x"
+ fi
+
+ acceptable_majors="0 1 2"
+
+ apr_temp_acceptable_apr_config=""
+ for apr_temp_major in $acceptable_majors
+ do
+ case $apr_temp_major in
+ 0)
+ apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-config"
+ ;;
+ *)
+ apr_temp_acceptable_apr_config="$apr_temp_acceptable_apr_config apr-$apr_temp_major-config"
+ ;;
+ esac
+ done
+
+ { echo "$as_me:$LINENO: checking for APR" >&5
+echo $ECHO_N "checking for APR... $ECHO_C" >&6; }
+
+# Check whether --with-apr was given.
+if test "${with_apr+set}" = set; then
+ withval=$with_apr;
+ if test "$withval" = "no" || test "$withval" = "yes"; then
+ { { echo "$as_me:$LINENO: error: --with-apr requires a directory or file to be provided" >&5
+echo "$as_me: error: --with-apr requires a directory or file to be provided" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
+ do
+ for lookdir in "$withval/bin" "$withval"
+ do
+ if $TEST_X "$lookdir/$apr_temp_apr_config_file"; then
+ apr_config="$lookdir/$apr_temp_apr_config_file"
+
+ apr_found="yes"
+ break 2
+ fi
+ done
+ done
+
+ if test "$apr_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
+ apr_config="$withval"
+ apr_found="yes"
+ fi
+
+ if test "$apr_found" != "yes"; then
+ { { echo "$as_me:$LINENO: error: the --with-apr parameter is incorrect. It must specify an install prefix, a build directory, or an apr-config file." >&5
+echo "$as_me: error: the --with-apr parameter is incorrect. It must specify an install prefix, a build directory, or an apr-config file." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+
+ if test -n "1" && test "1" = "1"; then
+ for apr_temp_apr_config_file in $apr_temp_acceptable_apr_config
+ do
+ if $apr_temp_apr_config_file --help > /dev/null 2>&1 ; then
+ apr_config="$apr_temp_apr_config_file"
+
+ apr_found="yes"
+ break
+ else
+ for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
+ if $TEST_X "$lookdir/bin/$apr_temp_apr_config_file"; then
+ apr_config="$lookdir/bin/$apr_temp_apr_config_file"
+
+ apr_found="yes"
+ break 2
+ fi
+ done
+ fi
+ done
+ fi
+ if test "$apr_found" = "no" && test -d ""$srcdir/apr""; then
+ apr_temp_abs_srcdir="`cd \""$srcdir/apr"\" && pwd`"
+ apr_found="reconfig"
+ apr_bundled_major="`sed -n '/#define.*APR_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \""$srcdir/apr"/include/apr_version.h\"`"
+ case $apr_bundled_major in
+ "")
+ { { echo "$as_me:$LINENO: error: failed to find major version of bundled APR" >&5
+echo "$as_me: error: failed to find major version of bundled APR" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ 0)
+ apr_temp_apr_config_file="apr-config"
+ ;;
+ *)
+ apr_temp_apr_config_file="apr-$apr_bundled_major-config"
+ ;;
+ esac
+ if test -n ""./apr""; then
+ apr_config=""./apr"/$apr_temp_apr_config_file"
+ else
+ apr_config=""$srcdir/apr"/$apr_temp_apr_config_file"
+ fi
+ fi
+
+fi
+
+
+ { echo "$as_me:$LINENO: result: $apr_found" >&5
+echo "${ECHO_T}$apr_found" >&6; }
+
+
+if test "$apr_found" = "no"; then
+ { { echo "$as_me:$LINENO: error: APR not found. Please read the documentation." >&5
+echo "$as_me: error: APR not found. Please read the documentation." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test "$apr_found" = "reconfig"; then
+
+ # save our work to this point; this allows the sub-package to use it
+ cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { echo "$as_me:$LINENO: updating cache $cache_file" >&5
+echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+ echo "configuring package in apr now"
+ ac_popdir=`pwd`
+ apr_config_subdirs="apr"
+ test -d apr || $mkdir_p apr
+ ac_abs_srcdir=`(cd $srcdir/apr && pwd)`
+ cd apr
+
+ # A "../" for each directory in /$config_subdirs.
+ ac_dots=`echo $apr_config_subdirs|sed -e 's%^\./%%' -e 's%[^/]$%&/%' -e 's%[^/]*/%../%g'`
+
+ # Make the cache file pathname absolute for the subdirs
+ # required to correctly handle subdirs that might actually
+ # be symlinks
+ case "$cache_file" in
+ /*) # already absolute
+ ac_sub_cache_file=$cache_file ;;
+ *) # Was relative path.
+ ac_sub_cache_file="$ac_popdir/$cache_file" ;;
+ esac
+
+
+ apr_configure_args=
+ apr_sep=
+ for apr_configure_arg in $ac_configure_args
+ do
+ case "$apr_configure_arg" in
+ --enable-layout=*|\'--enable-layout=*)
+ continue ;;
+ esac
+ apr_configure_args="$apr_configure_args$apr_sep'$apr_configure_arg'"
+ apr_sep=" "
+ done
+
+
+ test "x$silent" = "xyes" && apr_configure_args="$apr_configure_args --silent"
+
+ apr_configure_args="--disable-option-checking $apr_configure_args"
+
+ if eval $SHELL $ac_abs_srcdir/configure $apr_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_abs_srcdir --prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir --datadir=$datadir --with-installbuilddir=$installbuilddir
+ then :
+ echo "apr configured properly"
+ else
+ echo "configure failed for apr"
+ exit 1
+ fi
+
+ cd $ac_popdir
+
+ # grab any updates from the sub-package
+ if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+
+ SERF_BUILD_SRCLIB_DIRS="apr $SERF_BUILD_SRCLIB_DIRS"
+ SERF_CLEAN_SRCLIB_DIRS="$SERF_CLEAN_SRCLIB_DIRS apr"
+fi
+
+
+ if test -z "$CC"; then
+ test "x$silent" != "xyes" && echo " setting CC to \"`$apr_config --cc`\""
+ CC="`$apr_config --cc`"
+ fi
+
+
+ if test -z "$CPP"; then
+ test "x$silent" != "xyes" && echo " setting CPP to \"`$apr_config --cpp`\""
+ CPP="`$apr_config --cpp`"
+ fi
+
+
+ if test -z "$APR_LIBTOOL"; then
+ test "x$silent" != "xyes" && echo " setting APR_LIBTOOL to \"`$apr_config --apr-libtool`\""
+ APR_LIBTOOL="`$apr_config --apr-libtool`"
+ fi
+
+
+ if test "x$CFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting CFLAGS to \"`$apr_config --cflags`\""
+ CFLAGS="`$apr_config --cflags`"
+ else
+ apr_addto_bugger="`$apr_config --cflags`"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $CFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to CFLAGS"
+ CFLAGS="$CFLAGS $i"
+ fi
+ done
+ fi
+
+
+ if test "x$CPPFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting CPPFLAGS to \"`$apr_config --cppflags`\""
+ CPPFLAGS="`$apr_config --cppflags`"
+ else
+ apr_addto_bugger="`$apr_config --cppflags`"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $CPPFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $i"
+ fi
+ done
+ fi
+
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \"`$apr_config --ldflags`\""
+ LDFLAGS="`$apr_config --ldflags`"
+ else
+ apr_addto_bugger="`$apr_config --ldflags`"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+SHLIBPATH_VAR=`$apr_config --shlib-path-var`
+APR_BINDIR=`$apr_config --bindir`
+APR_INCLUDES=`$apr_config --includes`
+APR_VERSION=`$apr_config --version`
+APR_CONFIG="$apr_config"
+
+
+ if test -z "$LTFLAGS"; then
+ test "x$silent" != "xyes" && echo " setting LTFLAGS to \""--silent"\""
+ LTFLAGS=""--silent""
+ fi
+
+
+
+
+
+
+
+
+
+APR_VERSION_MAJOR="`echo \"$APR_VERSION\" | sed 's,\..*,,'`"
+APR_VERSION_NUM="`echo \"$APR_VERSION\" | \
+ sed -e 's/[^0-9\.].*$//' \
+ -e 's/\.\([0-9]\)$/.0\1/' \
+ -e 's/\.\([0-9][0-9]\)$/.0\1/' \
+ -e 's/\.\([0-9]\)\./0\1/; s/\.//g;'`"
+
+if test "$APR_VERSION_NUM" -ge "200000"; then
+
+ APU_BINDIR=""
+ APU_INCLUDES=""
+ APU_VERSION=""
+ APU_CONFIG=""
+
+else
+
+
+
+
+
+ echo $ac_n "${nl}Configuring Apache Portable Runtime Utility library...${nl}"
+
+
+
+ apu_found="no"
+
+ if test "$target_os" = "os2-emx"; then
+ # Scripts don't pass test -x on OS/2
+ TEST_X="test -f"
+ else
+ TEST_X="test -x"
+ fi
+
+ acceptable_majors="$APR_VERSION_MAJOR"
+
+ apu_temp_acceptable_apu_config=""
+ for apu_temp_major in $acceptable_majors
+ do
+ case $apu_temp_major in
+ 0)
+ apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-config"
+ ;;
+ *)
+ apu_temp_acceptable_apu_config="$apu_temp_acceptable_apu_config apu-$apu_temp_major-config"
+ ;;
+ esac
+ done
+
+ { echo "$as_me:$LINENO: checking for APR-util" >&5
+echo $ECHO_N "checking for APR-util... $ECHO_C" >&6; }
+
+# Check whether --with-apr-util was given.
+if test "${with_apr_util+set}" = set; then
+ withval=$with_apr_util;
+ if test "$withval" = "no" || test "$withval" = "yes"; then
+ { { echo "$as_me:$LINENO: error: --with-apr-util requires a directory or file to be provided" >&5
+echo "$as_me: error: --with-apr-util requires a directory or file to be provided" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
+ do
+ for lookdir in "$withval/bin" "$withval"
+ do
+ if $TEST_X "$lookdir/$apu_temp_apu_config_file"; then
+ apu_config="$lookdir/$apu_temp_apu_config_file"
+
+ apu_found="yes"
+ break 2
+ fi
+ done
+ done
+
+ if test "$apu_found" != "yes" && $TEST_X "$withval" && $withval --help > /dev/null 2>&1 ; then
+ apu_config="$withval"
+ apu_found="yes"
+ fi
+
+ if test "$apu_found" != "yes"; then
+ { { echo "$as_me:$LINENO: error: the --with-apr-util parameter is incorrect. It must specify an install prefix, a build directory, or an apu-config file." >&5
+echo "$as_me: error: the --with-apr-util parameter is incorrect. It must specify an install prefix, a build directory, or an apu-config file." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+
+ if test -n "1" && test "1" = "1"; then
+ for apu_temp_apu_config_file in $apu_temp_acceptable_apu_config
+ do
+ if $apu_temp_apu_config_file --help > /dev/null 2>&1 ; then
+ apu_config="$apu_temp_apu_config_file"
+
+ apu_found="yes"
+ break
+ else
+ for lookdir in /usr /usr/local /usr/local/apr /opt/apr; do
+ if $TEST_X "$lookdir/bin/$apu_temp_apu_config_file"; then
+ apu_config="$lookdir/bin/$apu_temp_apu_config_file"
+
+ apu_found="yes"
+ break 2
+ fi
+ done
+ fi
+ done
+ fi
+ if test "$apu_found" = "no" && test -d ""$srcdir/apr-util""; then
+ apu_temp_abs_srcdir="`cd \""$srcdir/apr-util"\" && pwd`"
+ apu_found="reconfig"
+ apu_bundled_major="`sed -n '/#define.*APU_MAJOR_VERSION/s/^[^0-9]*\([0-9]*\).*$/\1/p' \""$srcdir/apr-util"/include/apu_version.h\"`"
+ case $apu_bundled_major in
+ "")
+ { { echo "$as_me:$LINENO: error: failed to find major version of bundled APU" >&5
+echo "$as_me: error: failed to find major version of bundled APU" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ 0)
+ apu_temp_apu_config_file="apu-config"
+ ;;
+ *)
+ apu_temp_apu_config_file="apu-$apu_bundled_major-config"
+ ;;
+ esac
+ if test -n ""./apr-util""; then
+ apu_config=""./apr-util"/$apu_temp_apu_config_file"
+ else
+ apu_config=""$srcdir/apr-util"/$apu_temp_apu_config_file"
+ fi
+ fi
+
+fi
+
+
+ { echo "$as_me:$LINENO: result: $apu_found" >&5
+echo "${ECHO_T}$apu_found" >&6; }
+
+
+
+ if test "$apu_found" = "no"; then
+ { { echo "$as_me:$LINENO: error: APR-util not found. Please read the documentation." >&5
+echo "$as_me: error: APR-util not found. Please read the documentation." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ # Catch some misconfigurations:
+ case ${apr_found}.${apu_found} in
+ reconfig.yes)
+ { { echo "$as_me:$LINENO: error: Cannot use an external APR-util with the bundled APR" >&5
+echo "$as_me: error: Cannot use an external APR-util with the bundled APR" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ yes.reconfig)
+ { { echo "$as_me:$LINENO: error: Cannot use an external APR with the bundled APR-util" >&5
+echo "$as_me: error: Cannot use an external APR with the bundled APR-util" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ esac
+
+ if test "$apu_found" = "reconfig"; then
+
+ # save our work to this point; this allows the sub-package to use it
+ cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { echo "$as_me:$LINENO: updating cache $cache_file" >&5
+echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+ echo "configuring package in apr-util now"
+ ac_popdir=`pwd`
+ apr_config_subdirs="apr-util"
+ test -d apr-util || $mkdir_p apr-util
+ ac_abs_srcdir=`(cd $srcdir/apr-util && pwd)`
+ cd apr-util
+
+ # A "../" for each directory in /$config_subdirs.
+ ac_dots=`echo $apr_config_subdirs|sed -e 's%^\./%%' -e 's%[^/]$%&/%' -e 's%[^/]*/%../%g'`
+
+ # Make the cache file pathname absolute for the subdirs
+ # required to correctly handle subdirs that might actually
+ # be symlinks
+ case "$cache_file" in
+ /*) # already absolute
+ ac_sub_cache_file=$cache_file ;;
+ *) # Was relative path.
+ ac_sub_cache_file="$ac_popdir/$cache_file" ;;
+ esac
+
+
+ apr_configure_args=
+ apr_sep=
+ for apr_configure_arg in $ac_configure_args
+ do
+ case "$apr_configure_arg" in
+ --enable-layout=*|\'--enable-layout=*)
+ continue ;;
+ esac
+ apr_configure_args="$apr_configure_args$apr_sep'$apr_configure_arg'"
+ apr_sep=" "
+ done
+
+
+ test "x$silent" = "xyes" && apr_configure_args="$apr_configure_args --silent"
+
+ apr_configure_args="--disable-option-checking $apr_configure_args"
+
+ if eval $SHELL $ac_abs_srcdir/configure $apr_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_abs_srcdir --with-apr=../apr --prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir
+ then :
+ echo "apr-util configured properly"
+ else
+ echo "configure failed for apr-util"
+ exit 1
+ fi
+
+ cd $ac_popdir
+
+ # grab any updates from the sub-package
+ if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+
+ SERF_BUILD_SRCLIB_DIRS="$SERF_BUILD_SRCLIB_DIRS apr-util"
+ SERF_CLEAN_SRCLIB_DIRS="apr-util $SERF_CLEAN_SRCLIB_DIRS"
+ fi
+
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \"`$apu_config --ldflags`\""
+ LDFLAGS="`$apu_config --ldflags`"
+ else
+ apr_addto_bugger="`$apu_config --ldflags`"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+ APU_BINDIR=`$apu_config --bindir`
+ APU_INCLUDES=`$apu_config --includes`
+ APU_VERSION=`$apu_config --version`
+ APU_CONFIG="$APU_BINDIR/apu-`echo ${APU_VERSION} | sed 's,\..*,,'`-config"
+fi
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO: checking for C compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5
+echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; }
+ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+#
+# List of possible output files, starting from the most likely.
+# The algorithm is not robust to junk in `.', hence go to wildcards (a.*)
+# only as a last resort. b.out is created by i960 compilers.
+ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out'
+#
+# The IRIX 6 linker writes into existing files which may not be
+# executable, retaining their permissions. Remove them first so a
+# subsequent execution test works.
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { (ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+
+{ echo "$as_me:$LINENO: result: $ac_file" >&5
+echo "${ECHO_T}$ac_file" >&6; }
+if test -z "$ac_file"; then
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: C compiler cannot create executables
+See \`config.log' for more details." >&5
+echo "$as_me: error: C compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5
+echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; }
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { echo "$as_me:$LINENO: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+fi
+{ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+
+rm -f a.out a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; }
+{ echo "$as_me:$LINENO: result: $cross_compiling" >&5
+echo "${ECHO_T}$cross_compiling" >&6; }
+
+{ echo "$as_me:$LINENO: checking for suffix of executables" >&5
+echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; }
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+echo "${ECHO_T}$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+{ echo "$as_me:$LINENO: checking for suffix of object files" >&5
+echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; }
+if test "${ac_cv_objext+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+echo "${ECHO_T}$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; }
+GCC=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
+echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_c89=$ac_arg
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { echo "$as_me:$LINENO: result: none needed" >&5
+echo "${ECHO_T}none needed" >&6; } ;;
+ xno)
+ { echo "$as_me:$LINENO: result: unsupported" >&5
+echo "${ECHO_T}unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if test "${ac_cv_prog_CPP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ echo "$as_me:$LINENO: result: $CPP" >&5
+echo "${ECHO_T}$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&5
+echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+{ echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; }
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+done
+IFS=$as_save_IFS
+
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ echo "$as_me:$LINENO: result: $INSTALL" >&5
+echo "${ECHO_T}$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+
+if test "x${cache_file}" = "x/dev/null"; then
+ # Likewise, ensure that CC and CPP are passed through to the pcre
+ # configure script iff caching is disabled (the autoconf 2.5x default).
+ export CC; export CPP
+fi
+
+echo $ac_n "Configuring Serf...${nl}"
+
+abs_srcdir=`(cd $srcdir && pwd)`
+abs_builddir=`pwd`
+
+get_version="$abs_srcdir/build/get-version.sh"
+version_hdr="$abs_srcdir/serf.h"
+SERF_MAJOR_VERSION="`$get_version major $version_hdr SERF`"
+SERF_DOTTED_VERSION="`$get_version all $version_hdr SERF`"
+
+
+
+
+
+
+
+
+# Check whether --with-openssl was given.
+if test "${with_openssl+set}" = set; then
+ withval=$with_openssl;
+ if test "$withval" = "yes"; then
+ { { echo "$as_me:$LINENO: error: --with-openssl requires a path" >&5
+echo "$as_me: error: --with-openssl requires a path" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ openssl_prefix=$withval
+
+ if test "x$openssl_prefix" != "x" -a ! -d "$openssl_prefix"; then
+ { { echo "$as_me:$LINENO: error: '--with-openssl requires a path to a directory'" >&5
+echo "$as_me: error: '--with-openssl requires a path to a directory'" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+
+ if test "x$CPPFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting CPPFLAGS to \""-I${openssl_prefix}/include"\""
+ CPPFLAGS=""-I${openssl_prefix}/include""
+ else
+ apr_addto_bugger=""-I${openssl_prefix}/include""
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $CPPFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $i"
+ fi
+ done
+ fi
+
+ if test -e "${openssl_prefix}/Makefile"; then
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \""-L${openssl_prefix}"\""
+ LDFLAGS=""-L${openssl_prefix}""
+ else
+ apr_addto_bugger=""-L${openssl_prefix}""
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \""-R${openssl_prefix}"\""
+ LDFLAGS=""-R${openssl_prefix}""
+ else
+ apr_addto_bugger=""-R${openssl_prefix}""
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+ else
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \""-L${openssl_prefix}/lib"\""
+ LDFLAGS=""-L${openssl_prefix}/lib""
+ else
+ apr_addto_bugger=""-L${openssl_prefix}/lib""
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+
+ if test "x$LDFLAGS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting LDFLAGS to \""-R${openssl_prefix}/lib"\""
+ LDFLAGS=""-R${openssl_prefix}/lib""
+ else
+ apr_addto_bugger=""-R${openssl_prefix}/lib""
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $LDFLAGS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to LDFLAGS"
+ LDFLAGS="$LDFLAGS $i"
+ fi
+ done
+ fi
+
+ fi
+ fi
+
+fi
+
+
+
+
+{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5
+echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Extract the first word of "grep ggrep" to use in msg output
+if test -z "$GREP"; then
+set dummy grep ggrep; ac_prog_name=$2
+if test "${ac_cv_path_GREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_path_GREP_found=false
+# Loop through the user's path and test for each of PROGNAME-LIST
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+ # Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+
+ $ac_path_GREP_found && break 3
+ done
+done
+
+done
+IFS=$as_save_IFS
+
+
+fi
+
+GREP="$ac_cv_path_GREP"
+if test -z "$GREP"; then
+ { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5
+echo "${ECHO_T}$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ echo "$as_me:$LINENO: checking for egrep" >&5
+echo $ECHO_N "checking for egrep... $ECHO_C" >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ # Extract the first word of "egrep" to use in msg output
+if test -z "$EGREP"; then
+set dummy egrep; ac_prog_name=$2
+if test "${ac_cv_path_EGREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_path_EGREP_found=false
+# Loop through the user's path and test for each of PROGNAME-LIST
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+ # Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+
+ $ac_path_EGREP_found && break 3
+ done
+done
+
+done
+IFS=$as_save_IFS
+
+
+fi
+
+EGREP="$ac_cv_path_EGREP"
+if test -z "$EGREP"; then
+ { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+
+ fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5
+echo "${ECHO_T}$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_header_stdc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_header_stdc=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f -r conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f -r conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+echo "${ECHO_T}$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ eval "$as_ac_Header=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+if test "${ac_cv_header_openssl_opensslv_h+set}" = set; then
+ { echo "$as_me:$LINENO: checking for openssl/opensslv.h" >&5
+echo $ECHO_N "checking for openssl/opensslv.h... $ECHO_C" >&6; }
+if test "${ac_cv_header_openssl_opensslv_h+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_header_openssl_opensslv_h" >&5
+echo "${ECHO_T}$ac_cv_header_openssl_opensslv_h" >&6; }
+else
+ # Is the header compilable?
+{ echo "$as_me:$LINENO: checking openssl/opensslv.h usability" >&5
+echo $ECHO_N "checking openssl/opensslv.h usability... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <openssl/opensslv.h>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ echo "$as_me:$LINENO: checking openssl/opensslv.h presence" >&5
+echo $ECHO_N "checking openssl/opensslv.h presence... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <openssl/opensslv.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: proceeding with the compiler's result" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: present but cannot be compiled" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: see the Autoconf documentation" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: see the Autoconf documentation" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: section \"Present But Cannot Be Compiled\"" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: section \"Present But Cannot Be Compiled\"" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: proceeding with the preprocessor's result" >&2;}
+ { echo "$as_me:$LINENO: WARNING: openssl/opensslv.h: in the future, the compiler will take precedence" >&5
+echo "$as_me: WARNING: openssl/opensslv.h: in the future, the compiler will take precedence" >&2;}
+
+ ;;
+esac
+{ echo "$as_me:$LINENO: checking for openssl/opensslv.h" >&5
+echo $ECHO_N "checking for openssl/opensslv.h... $ECHO_C" >&6; }
+if test "${ac_cv_header_openssl_opensslv_h+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_header_openssl_opensslv_h=$ac_header_preproc
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_header_openssl_opensslv_h" >&5
+echo "${ECHO_T}$ac_cv_header_openssl_opensslv_h" >&6; }
+
+fi
+if test $ac_cv_header_openssl_opensslv_h = yes; then
+ :
+else
+ { { echo "$as_me:$LINENO: error: We require OpenSSL; try --with-openssl" >&5
+echo "$as_me: error: We require OpenSSL; try --with-openssl" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+
+
+{ echo "$as_me:$LINENO: checking for library containing fabs" >&5
+echo $ECHO_N "checking for library containing fabs... $ECHO_C" >&6; }
+if test "${ac_cv_search_fabs+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char fabs ();
+int
+main ()
+{
+return fabs ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' m; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext &&
+ $as_test_x conftest$ac_exeext; then
+ ac_cv_search_fabs=$ac_res
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext
+ if test "${ac_cv_search_fabs+set}" = set; then
+ break
+fi
+done
+if test "${ac_cv_search_fabs+set}" = set; then
+ :
+else
+ ac_cv_search_fabs=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_search_fabs" >&5
+echo "${ECHO_T}$ac_cv_search_fabs" >&6; }
+ac_res=$ac_cv_search_fabs
+if test "$ac_res" != no; then
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+fi
+
+
+libs=""
+if test -n "$apu_config"; then
+
+ if test "x$libs" = "x"; then
+ test "x$silent" != "xyes" && echo " setting libs to \"`$apu_config --link-libtool --libs`\""
+ libs="`$apu_config --link-libtool --libs`"
+ else
+ apr_addto_bugger="`$apu_config --link-libtool --libs`"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $libs; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to libs"
+ libs="$libs $i"
+ fi
+ done
+ fi
+
+fi
+
+ if test "x$libs" = "x"; then
+ test "x$silent" != "xyes" && echo " setting libs to \"`$apr_config --link-libtool --libs` $LIBS\""
+ libs="`$apr_config --link-libtool --libs` $LIBS"
+ else
+ apr_addto_bugger="`$apr_config --link-libtool --libs` $LIBS"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $libs; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to libs"
+ libs="$libs $i"
+ fi
+ done
+ fi
+
+
+
+ if test "x$SERF_LIBS" = "x"; then
+ test "x$silent" != "xyes" && echo " setting SERF_LIBS to \"$libs\""
+ SERF_LIBS="$libs"
+ else
+ apr_addto_bugger="$libs"
+ for i in $apr_addto_bugger; do
+ apr_addto_duplicate="0"
+ for j in $SERF_LIBS; do
+ if test "x$i" = "x$j"; then
+ apr_addto_duplicate="1"
+ break
+ fi
+ done
+ if test $apr_addto_duplicate = "0"; then
+ test "x$silent" != "xyes" && echo " adding \"$i\" to SERF_LIBS"
+ SERF_LIBS="$SERF_LIBS $i"
+ fi
+ done
+ fi
+
+
+
+
+set X $apr_ste_save_CPPFLAGS
+if test ${#} -eq 1; then
+ EXTRA_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS=
+else
+ if test "x$apr_ste_save_CPPFLAGS" = "x$CPPFLAGS"; then
+ EXTRA_CPPFLAGS=
+ else
+ EXTRA_CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s%${apr_ste_save_CPPFLAGS}%%"`
+ CPPFLAGS="$apr_ste_save_CPPFLAGS"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring CPPFLAGS to \"$CPPFLAGS\""
+ echo " setting EXTRA_CPPFLAGS to \"$EXTRA_CPPFLAGS\""
+fi
+
+
+
+set X $apr_ste_save_CFLAGS
+if test ${#} -eq 1; then
+ EXTRA_CFLAGS="$CFLAGS"
+ CFLAGS=
+else
+ if test "x$apr_ste_save_CFLAGS" = "x$CFLAGS"; then
+ EXTRA_CFLAGS=
+ else
+ EXTRA_CFLAGS=`echo "$CFLAGS" | sed -e "s%${apr_ste_save_CFLAGS}%%"`
+ CFLAGS="$apr_ste_save_CFLAGS"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring CFLAGS to \"$CFLAGS\""
+ echo " setting EXTRA_CFLAGS to \"$EXTRA_CFLAGS\""
+fi
+
+
+
+set X $apr_ste_save_CXXFLAGS
+if test ${#} -eq 1; then
+ EXTRA_CXXFLAGS="$CXXFLAGS"
+ CXXFLAGS=
+else
+ if test "x$apr_ste_save_CXXFLAGS" = "x$CXXFLAGS"; then
+ EXTRA_CXXFLAGS=
+ else
+ EXTRA_CXXFLAGS=`echo "$CXXFLAGS" | sed -e "s%${apr_ste_save_CXXFLAGS}%%"`
+ CXXFLAGS="$apr_ste_save_CXXFLAGS"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring CXXFLAGS to \"$CXXFLAGS\""
+ echo " setting EXTRA_CXXFLAGS to \"$EXTRA_CXXFLAGS\""
+fi
+
+
+
+set X $apr_ste_save_LDFLAGS
+if test ${#} -eq 1; then
+ EXTRA_LDFLAGS="$LDFLAGS"
+ LDFLAGS=
+else
+ if test "x$apr_ste_save_LDFLAGS" = "x$LDFLAGS"; then
+ EXTRA_LDFLAGS=
+ else
+ EXTRA_LDFLAGS=`echo "$LDFLAGS" | sed -e "s%${apr_ste_save_LDFLAGS}%%"`
+ LDFLAGS="$apr_ste_save_LDFLAGS"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring LDFLAGS to \"$LDFLAGS\""
+ echo " setting EXTRA_LDFLAGS to \"$EXTRA_LDFLAGS\""
+fi
+
+
+
+set X $apr_ste_save_LIBS
+if test ${#} -eq 1; then
+ EXTRA_LIBS="$LIBS"
+ LIBS=
+else
+ if test "x$apr_ste_save_LIBS" = "x$LIBS"; then
+ EXTRA_LIBS=
+ else
+ EXTRA_LIBS=`echo "$LIBS" | sed -e "s%${apr_ste_save_LIBS}%%"`
+ LIBS="$apr_ste_save_LIBS"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring LIBS to \"$LIBS\""
+ echo " setting EXTRA_LIBS to \"$EXTRA_LIBS\""
+fi
+
+
+
+set X $apr_ste_save_INCLUDES
+if test ${#} -eq 1; then
+ EXTRA_INCLUDES="$INCLUDES"
+ INCLUDES=
+else
+ if test "x$apr_ste_save_INCLUDES" = "x$INCLUDES"; then
+ EXTRA_INCLUDES=
+ else
+ EXTRA_INCLUDES=`echo "$INCLUDES" | sed -e "s%${apr_ste_save_INCLUDES}%%"`
+ INCLUDES="$apr_ste_save_INCLUDES"
+ fi
+fi
+if test "x$silent" != "xyes"; then
+ echo " restoring INCLUDES to \"$INCLUDES\""
+ echo " setting EXTRA_INCLUDES to \"$EXTRA_INCLUDES\""
+fi
+
+
+
+ac_config_files="$ac_config_files Makefile serf.pc"
+
+ac_config_commands="$ac_config_commands mkdir-vpath"
+
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { echo "$as_me:$LINENO: updating cache $cache_file" >&5
+echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+# Transform confdefs.h into DEFS.
+# Protect against shell expansion while executing Makefile rules.
+# Protect against Makefile macro expansion.
+#
+# If the first sed substitution is executed (which looks for macros that
+# take arguments), then branch to the quote section. Otherwise,
+# look for a macro that doesn't take arguments.
+ac_script='
+t clear
+:clear
+s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g
+t quote
+s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g
+t quote
+b any
+:quote
+s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g
+s/\[/\\&/g
+s/\]/\\&/g
+s/\$/$$/g
+H
+:any
+${
+ g
+ s/^\n//
+ s/\n/ /g
+ p
+}
+'
+DEFS=`sed -n "$ac_script" confdefs.h`
+
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+as_nl='
+'
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir
+fi
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.61. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+
+Configuration files:
+$config_files
+
+Configuration commands:
+$config_commands
+
+Report bugs to <bug-autoconf@gnu.org>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ac_cs_version="\\
+config.status
+configured by $0, generated by GNU Autoconf 2.61,
+ with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright (C) 2006 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --he | --h | --help | --hel | -h )
+ echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+if \$ac_cs_recheck; then
+ echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
+ CONFIG_SHELL=$SHELL
+ export CONFIG_SHELL
+ exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "serf.pc") CONFIG_FILES="$CONFIG_FILES serf.pc" ;;
+ "mkdir-vpath") CONFIG_COMMANDS="$CONFIG_COMMANDS mkdir-vpath" ;;
+
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+#
+# Set up the sed scripts for CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "$CONFIG_FILES"; then
+
+_ACEOF
+
+
+
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ cat >conf$$subs.sed <<_ACEOF
+SHELL!$SHELL$ac_delim
+PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim
+PACKAGE_NAME!$PACKAGE_NAME$ac_delim
+PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim
+PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim
+PACKAGE_STRING!$PACKAGE_STRING$ac_delim
+PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim
+exec_prefix!$exec_prefix$ac_delim
+prefix!$prefix$ac_delim
+program_transform_name!$program_transform_name$ac_delim
+bindir!$bindir$ac_delim
+sbindir!$sbindir$ac_delim
+libexecdir!$libexecdir$ac_delim
+datarootdir!$datarootdir$ac_delim
+datadir!$datadir$ac_delim
+sysconfdir!$sysconfdir$ac_delim
+sharedstatedir!$sharedstatedir$ac_delim
+localstatedir!$localstatedir$ac_delim
+includedir!$includedir$ac_delim
+oldincludedir!$oldincludedir$ac_delim
+docdir!$docdir$ac_delim
+infodir!$infodir$ac_delim
+htmldir!$htmldir$ac_delim
+dvidir!$dvidir$ac_delim
+pdfdir!$pdfdir$ac_delim
+psdir!$psdir$ac_delim
+libdir!$libdir$ac_delim
+localedir!$localedir$ac_delim
+mandir!$mandir$ac_delim
+DEFS!$DEFS$ac_delim
+ECHO_C!$ECHO_C$ac_delim
+ECHO_N!$ECHO_N$ac_delim
+ECHO_T!$ECHO_T$ac_delim
+LIBS!$LIBS$ac_delim
+build_alias!$build_alias$ac_delim
+host_alias!$host_alias$ac_delim
+target_alias!$target_alias$ac_delim
+mkdir_p!$mkdir_p$ac_delim
+build!$build$ac_delim
+build_cpu!$build_cpu$ac_delim
+build_vendor!$build_vendor$ac_delim
+build_os!$build_os$ac_delim
+host!$host$ac_delim
+host_cpu!$host_cpu$ac_delim
+host_vendor!$host_vendor$ac_delim
+host_os!$host_os$ac_delim
+target!$target$ac_delim
+target_cpu!$target_cpu$ac_delim
+target_vendor!$target_vendor$ac_delim
+target_os!$target_os$ac_delim
+LTFLAGS!$LTFLAGS$ac_delim
+APR_LIBTOOL!$APR_LIBTOOL$ac_delim
+APR_BINDIR!$APR_BINDIR$ac_delim
+APR_INCLUDES!$APR_INCLUDES$ac_delim
+APR_VERSION!$APR_VERSION$ac_delim
+APR_CONFIG!$APR_CONFIG$ac_delim
+APU_BINDIR!$APU_BINDIR$ac_delim
+APU_INCLUDES!$APU_INCLUDES$ac_delim
+APU_VERSION!$APU_VERSION$ac_delim
+APU_CONFIG!$APU_CONFIG$ac_delim
+CC!$CC$ac_delim
+CFLAGS!$CFLAGS$ac_delim
+LDFLAGS!$LDFLAGS$ac_delim
+CPPFLAGS!$CPPFLAGS$ac_delim
+ac_ct_CC!$ac_ct_CC$ac_delim
+EXEEXT!$EXEEXT$ac_delim
+OBJEXT!$OBJEXT$ac_delim
+CPP!$CPP$ac_delim
+INSTALL_PROGRAM!$INSTALL_PROGRAM$ac_delim
+INSTALL_SCRIPT!$INSTALL_SCRIPT$ac_delim
+INSTALL_DATA!$INSTALL_DATA$ac_delim
+SERF_MAJOR_VERSION!$SERF_MAJOR_VERSION$ac_delim
+SERF_DOTTED_VERSION!$SERF_DOTTED_VERSION$ac_delim
+SERF_BUILD_SRCLIB_DIRS!$SERF_BUILD_SRCLIB_DIRS$ac_delim
+SERF_CLEAN_SRCLIB_DIRS!$SERF_CLEAN_SRCLIB_DIRS$ac_delim
+GREP!$GREP$ac_delim
+EGREP!$EGREP$ac_delim
+SERF_LIBS!$SERF_LIBS$ac_delim
+EXTRA_CPPFLAGS!$EXTRA_CPPFLAGS$ac_delim
+EXTRA_CFLAGS!$EXTRA_CFLAGS$ac_delim
+EXTRA_CXXFLAGS!$EXTRA_CXXFLAGS$ac_delim
+EXTRA_LDFLAGS!$EXTRA_LDFLAGS$ac_delim
+EXTRA_LIBS!$EXTRA_LIBS$ac_delim
+EXTRA_INCLUDES!$EXTRA_INCLUDES$ac_delim
+LIBOBJS!$LIBOBJS$ac_delim
+LTLIBOBJS!$LTLIBOBJS$ac_delim
+_ACEOF
+
+ if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 86; then
+ break
+ elif $ac_last_try; then
+ { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed`
+if test -n "$ac_eof"; then
+ ac_eof=`echo "$ac_eof" | sort -nru | sed 1q`
+ ac_eof=`expr $ac_eof + 1`
+fi
+
+cat >>$CONFIG_STATUS <<_ACEOF
+cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end
+_ACEOF
+sed '
+s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g
+s/^/s,@/; s/!/@,|#_!!_#|/
+:n
+t n
+s/'"$ac_delim"'$/,g/; t
+s/$/\\/; p
+N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n
+' >>$CONFIG_STATUS <conf$$subs.sed
+rm -f conf$$subs.sed
+cat >>$CONFIG_STATUS <<_ACEOF
+:end
+s/|#_!!_#|//g
+CEOF$ac_eof
+_ACEOF
+
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+fi # test -n "$CONFIG_FILES"
+
+
+for ac_tag in :F $CONFIG_FILES :C $CONFIG_COMMANDS
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5
+echo "$as_me: error: Invalid tag $ac_tag." >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ ac_file_inputs="$ac_file_inputs $ac_f"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input="Generated from "`IFS=:
+ echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure."
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ fi
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin";;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+case `sed -n '/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+' $ac_file_inputs` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF
+ sed "$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s&@configure_input@&$configure_input&;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+$ac_datarootdir_hack
+" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out"; rm -f "$tmp/out";;
+ *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;;
+ esac
+ ;;
+
+
+ :C) { echo "$as_me:$LINENO: executing $ac_file commands" >&5
+echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "mkdir-vpath":C) make mkdir-vpath ;;
+
+ esac
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+
diff --git a/configure.in b/configure.in
new file mode 100644
index 0000000..ffd5acd
--- /dev/null
+++ b/configure.in
@@ -0,0 +1,219 @@
+dnl Autoconf file for Serf
+
+AC_PREREQ(2.50)
+AC_INIT(context.c)
+
+AC_CONFIG_AUX_DIR(build)
+
+sinclude(build/apr_common.m4)
+sinclude(build/find_apr.m4)
+
+AC_PREFIX_DEFAULT(/usr/local/serf)
+
+dnl Get the layout here, so we can pass the required variables to apr
+APR_ENABLE_LAYOUT(Serf, [])
+
+dnl reparse the configure arguments.
+APR_PARSE_ARGUMENTS
+
+APR_SAVE_THE_ENVIRONMENT(CPPFLAGS)
+APR_SAVE_THE_ENVIRONMENT(CFLAGS)
+APR_SAVE_THE_ENVIRONMENT(CXXFLAGS)
+APR_SAVE_THE_ENVIRONMENT(LDFLAGS)
+APR_SAVE_THE_ENVIRONMENT(LIBS)
+APR_SAVE_THE_ENVIRONMENT(INCLUDES)
+
+APR_CONFIG_NICE(config.nice)
+
+nl='
+'
+dnl Check that mkdir -p works
+APR_MKDIR_P_CHECK($top_srcdir/build/mkdir.sh)
+AC_SUBST(mkdir_p)
+
+dnl ## Run configure for packages Apache uses
+
+dnl shared library support for these packages doesn't currently
+dnl work on some platforms
+
+AC_CANONICAL_SYSTEM
+
+orig_prefix="$prefix"
+
+echo $ac_n "${nl}Configuring Apache Portable Runtime library...${nl}"
+
+APR_FIND_APR("$srcdir/apr", "./apr", 1, 0 1 2)
+
+if test "$apr_found" = "no"; then
+ AC_MSG_ERROR([APR not found. Please read the documentation.])
+fi
+
+if test "$apr_found" = "reconfig"; then
+ APR_SUBDIR_CONFIG(apr,
+ [--prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir --datadir=$datadir --with-installbuilddir=$installbuilddir],
+ [--enable-layout=*|\'--enable-layout=*])
+ dnl We must be the first to build and the last to be cleaned
+ SERF_BUILD_SRCLIB_DIRS="apr $SERF_BUILD_SRCLIB_DIRS"
+ SERF_CLEAN_SRCLIB_DIRS="$SERF_CLEAN_SRCLIB_DIRS apr"
+fi
+
+APR_SETIFNULL(CC, `$apr_config --cc`)
+APR_SETIFNULL(CPP, `$apr_config --cpp`)
+APR_SETIFNULL(APR_LIBTOOL, `$apr_config --apr-libtool`)
+APR_ADDTO(CFLAGS, `$apr_config --cflags`)
+APR_ADDTO(CPPFLAGS, `$apr_config --cppflags`)
+APR_ADDTO(LDFLAGS, `$apr_config --ldflags`)
+SHLIBPATH_VAR=`$apr_config --shlib-path-var`
+APR_BINDIR=`$apr_config --bindir`
+APR_INCLUDES=`$apr_config --includes`
+APR_VERSION=`$apr_config --version`
+APR_CONFIG="$apr_config"
+
+APR_SETIFNULL(LTFLAGS, "--silent")
+AC_SUBST(LTFLAGS)
+
+AC_SUBST(APR_LIBTOOL)
+AC_SUBST(APR_BINDIR)
+AC_SUBST(APR_INCLUDES)
+AC_SUBST(APR_VERSION)
+AC_SUBST(APR_CONFIG)
+
+APR_VERSION_MAJOR="`echo \"$APR_VERSION\" | sed 's,\..*,,'`"
+APR_VERSION_NUM="`echo \"$APR_VERSION\" | \
+ sed -e 's/[[^0-9\.]].*$//' \
+ -e 's/\.\([[0-9]]\)$/.0\1/' \
+ -e 's/\.\([[0-9]][[0-9]]\)$/.0\1/' \
+ -e 's/\.\([[0-9]]\)\./0\1/; s/\.//g;'`"
+
+if test "$APR_VERSION_NUM" -ge "200000"; then
+
+ APU_BINDIR=""
+ APU_INCLUDES=""
+ APU_VERSION=""
+ APU_CONFIG=""
+
+else
+ sinclude(build/find_apu.m4)
+
+ echo $ac_n "${nl}Configuring Apache Portable Runtime Utility library...${nl}"
+
+ ifdef([APR_FIND_APU], [
+ APR_FIND_APU("$srcdir/apr-util", "./apr-util", 1, $APR_VERSION_MAJOR)
+ ], [AC_MSG_ERROR([APR-util required, but find_apu.m4 not present!])])
+
+ if test "$apu_found" = "no"; then
+ AC_MSG_ERROR([APR-util not found. Please read the documentation.])
+ fi
+
+ # Catch some misconfigurations:
+ case ${apr_found}.${apu_found} in
+ reconfig.yes)
+ AC_MSG_ERROR([Cannot use an external APR-util with the bundled APR])
+ ;;
+ yes.reconfig)
+ AC_MSG_ERROR([Cannot use an external APR with the bundled APR-util])
+ ;;
+ esac
+
+ if test "$apu_found" = "reconfig"; then
+ APR_SUBDIR_CONFIG(apr-util,
+ [--with-apr=../apr --prefix=$prefix --exec-prefix=$exec_prefix --libdir=$libdir --includedir=$includedir --bindir=$bindir],
+ [--enable-layout=*|\'--enable-layout=*])
+ dnl We must be the last to build and the first to be cleaned
+ SERF_BUILD_SRCLIB_DIRS="$SERF_BUILD_SRCLIB_DIRS apr-util"
+ SERF_CLEAN_SRCLIB_DIRS="apr-util $SERF_CLEAN_SRCLIB_DIRS"
+ fi
+
+ APR_ADDTO(LDFLAGS, `$apu_config --ldflags`)
+ APU_BINDIR=`$apu_config --bindir`
+ APU_INCLUDES=`$apu_config --includes`
+ APU_VERSION=`$apu_config --version`
+ APU_CONFIG="$APU_BINDIR/apu-`echo ${APU_VERSION} | sed 's,\..*,,'`-config"
+fi
+
+AC_SUBST(APU_BINDIR)
+AC_SUBST(APU_INCLUDES)
+AC_SUBST(APU_VERSION)
+AC_SUBST(APU_CONFIG)
+
+dnl In case we picked up CC and CPP from APR, get that info into the
+dnl config cache so that PCRE uses it. Otherwise, CC and CPP used for
+dnl PCRE and for our config tests will be whatever PCRE determines.
+AC_PROG_CC
+AC_PROG_CPP
+AC_PROG_INSTALL
+
+if test "x${cache_file}" = "x/dev/null"; then
+ # Likewise, ensure that CC and CPP are passed through to the pcre
+ # configure script iff caching is disabled (the autoconf 2.5x default).
+ export CC; export CPP
+fi
+
+echo $ac_n "Configuring Serf...${nl}"
+
+dnl Absolute source/build directory
+abs_srcdir=`(cd $srcdir && pwd)`
+abs_builddir=`pwd`
+
+dnl get our version information
+get_version="$abs_srcdir/build/get-version.sh"
+version_hdr="$abs_srcdir/serf.h"
+SERF_MAJOR_VERSION="`$get_version major $version_hdr SERF`"
+SERF_DOTTED_VERSION="`$get_version all $version_hdr SERF`"
+
+AC_SUBST(SERF_MAJOR_VERSION)
+AC_SUBST(SERF_DOTTED_VERSION)
+
+AC_SUBST(SERF_BUILD_SRCLIB_DIRS)
+AC_SUBST(SERF_CLEAN_SRCLIB_DIRS)
+
+AC_ARG_WITH(openssl,
+ APR_HELP_STRING([--with-openssl=PATH],[Path to OpenSSL (eg. /usr/local/ssl)]),
+[
+ if test "$withval" = "yes"; then
+ AC_MSG_ERROR([--with-openssl requires a path])
+ else
+ openssl_prefix=$withval
+
+ if test "x$openssl_prefix" != "x" -a ! -d "$openssl_prefix"; then
+ AC_MSG_ERROR('--with-openssl requires a path to a directory')
+ fi
+
+ APR_ADDTO(CPPFLAGS, "-I${openssl_prefix}/include")
+ if test -e "${openssl_prefix}/Makefile"; then
+ APR_ADDTO(LDFLAGS, "-L${openssl_prefix}")
+ APR_ADDTO(LDFLAGS, "-R${openssl_prefix}")
+ else
+ APR_ADDTO(LDFLAGS, "-L${openssl_prefix}/lib")
+ APR_ADDTO(LDFLAGS, "-R${openssl_prefix}/lib")
+ fi
+ fi
+])
+
+dnl Look for OpenSSL
+AC_CHECK_HEADER([openssl/opensslv.h], [],
+ [AC_MSG_ERROR([We require OpenSSL; try --with-openssl])])
+
+dnl CuTest requires libm on Solaris
+AC_SEARCH_LIBS(fabs, m)
+
+libs=""
+if test -n "$apu_config"; then
+ APR_ADDTO(libs, [`$apu_config --link-libtool --libs`])
+fi
+APR_ADDTO(libs, [`$apr_config --link-libtool --libs` $LIBS])
+
+APR_ADDTO(SERF_LIBS, [$libs])
+AC_SUBST(SERF_LIBS)
+
+APR_RESTORE_THE_ENVIRONMENT(CPPFLAGS, EXTRA_)
+APR_RESTORE_THE_ENVIRONMENT(CFLAGS, EXTRA_)
+APR_RESTORE_THE_ENVIRONMENT(CXXFLAGS, EXTRA_)
+APR_RESTORE_THE_ENVIRONMENT(LDFLAGS, EXTRA_)
+APR_RESTORE_THE_ENVIRONMENT(LIBS, EXTRA_)
+APR_RESTORE_THE_ENVIRONMENT(INCLUDES, EXTRA_)
+
+AC_CONFIG_FILES([Makefile serf.pc])
+AC_CONFIG_COMMANDS([mkdir-vpath],[make mkdir-vpath])
+
+AC_OUTPUT
diff --git a/context.c b/context.c
new file mode 100644
index 0000000..d4aa220
--- /dev/null
+++ b/context.c
@@ -0,0 +1,371 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+/* Older versions of APR do not have the APR_VERSION_AT_LEAST macro. Those
+ implementations are safe.
+
+ If the macro *is* defined, and we're on WIN32, and APR is version 1.4.0,
+ then we have a broken WSAPoll() implementation.
+
+ See serf_context_create_ex() below. */
+#if defined(APR_VERSION_AT_LEAST) && defined(WIN32)
+#if APR_VERSION_AT_LEAST(1,4,0)
+#define BROKEN_WSAPOLL
+#endif
+#endif
+
+
+/**
+ * Callback function (implements serf_progress_t). Takes a number of bytes
+ * read @a read and bytes written @a written, adds those to the total for this
+ * context and notifies an interested party (if any).
+ */
+void serf__context_progress_delta(
+ void *progress_baton,
+ apr_off_t read,
+ apr_off_t written)
+{
+ serf_context_t *ctx = progress_baton;
+
+ ctx->progress_read += read;
+ ctx->progress_written += written;
+
+ if (ctx->progress_func)
+ ctx->progress_func(ctx->progress_baton,
+ ctx->progress_read,
+ ctx->progress_written);
+}
+
+
+/* Check for dirty connections and update their pollsets accordingly. */
+static apr_status_t check_dirty_pollsets(serf_context_t *ctx)
+{
+ int i;
+
+ /* if we're not dirty, return now. */
+ if (!ctx->dirty_pollset) {
+ return APR_SUCCESS;
+ }
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn = GET_CONN(ctx, i);
+ apr_status_t status;
+
+ /* if this connection isn't dirty, skip it. */
+ if (!conn->dirty_conn) {
+ continue;
+ }
+
+ /* reset this connection's flag before we update. */
+ conn->dirty_conn = 0;
+
+ if ((status = serf__conn_update_pollset(conn)) != APR_SUCCESS)
+ return status;
+ }
+
+ /* reset our context flag now */
+ ctx->dirty_pollset = 0;
+
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t pollset_add(void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton)
+{
+ serf_pollset_t *s = (serf_pollset_t*)user_baton;
+ pfd->client_data = serf_baton;
+ return apr_pollset_add(s->pollset, pfd);
+}
+
+static apr_status_t pollset_rm(void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton)
+{
+ serf_pollset_t *s = (serf_pollset_t*)user_baton;
+ pfd->client_data = serf_baton;
+ return apr_pollset_remove(s->pollset, pfd);
+}
+
+
+void serf_config_proxy(serf_context_t *ctx,
+ apr_sockaddr_t *address)
+{
+ ctx->proxy_address = address;
+}
+
+
+void serf_config_credentials_callback(serf_context_t *ctx,
+ serf_credentials_callback_t cred_cb)
+{
+ ctx->cred_cb = cred_cb;
+}
+
+
+void serf_config_authn_types(serf_context_t *ctx,
+ int authn_types)
+{
+ ctx->authn_types = authn_types;
+}
+
+
+serf_context_t *serf_context_create_ex(
+ void *user_baton,
+ serf_socket_add_t addf,
+ serf_socket_remove_t rmf,
+ apr_pool_t *pool)
+{
+ serf_context_t *ctx = apr_pcalloc(pool, sizeof(*ctx));
+
+ ctx->pool = pool;
+
+ if (user_baton != NULL) {
+ ctx->pollset_baton = user_baton;
+ ctx->pollset_add = addf;
+ ctx->pollset_rm = rmf;
+ }
+ else {
+ /* build the pollset with a (default) number of connections */
+ serf_pollset_t *ps = apr_pcalloc(pool, sizeof(*ps));
+#ifdef BROKEN_WSAPOLL
+ /* APR 1.4.x switched to using WSAPoll() on Win32, but it does not
+ * properly handle errors on a non-blocking sockets (such as
+ * connecting to a server where no listener is active).
+ *
+ * So, sadly, we must force using select() on Win32.
+ *
+ * http://mail-archives.apache.org/mod_mbox/apr-dev/201105.mbox/%3CBANLkTin3rBCecCBRvzUA5B-14u-NWxR_Kg@mail.gmail.com%3E
+ */
+ (void) apr_pollset_create_ex(&ps->pollset, MAX_CONN, pool, 0,
+ APR_POLLSET_SELECT);
+#else
+ (void) apr_pollset_create(&ps->pollset, MAX_CONN, pool, 0);
+#endif
+ ctx->pollset_baton = ps;
+ ctx->pollset_add = pollset_add;
+ ctx->pollset_rm = pollset_rm;
+ }
+
+ /* default to a single connection since that is the typical case */
+ ctx->conns = apr_array_make(pool, 1, sizeof(serf_connection_t *));
+
+ /* Initialize progress status */
+ ctx->progress_read = 0;
+ ctx->progress_written = 0;
+
+ ctx->authn_types = SERF_AUTHN_ALL;
+
+ return ctx;
+}
+
+
+serf_context_t *serf_context_create(apr_pool_t *pool)
+{
+ return serf_context_create_ex(NULL, NULL, NULL, pool);
+}
+
+apr_status_t serf_context_prerun(serf_context_t *ctx)
+{
+ apr_status_t status = APR_SUCCESS;
+ if ((status = serf__open_connections(ctx)) != APR_SUCCESS)
+ return status;
+
+ if ((status = check_dirty_pollsets(ctx)) != APR_SUCCESS)
+ return status;
+ return status;
+}
+
+
+apr_status_t serf_event_trigger(
+ serf_context_t *s,
+ void *serf_baton,
+ const apr_pollfd_t *desc)
+{
+ apr_pollfd_t tdesc = { 0 };
+ apr_status_t status = APR_SUCCESS;
+ serf_io_baton_t *io = serf_baton;
+
+ if (io->type == SERF_IO_CONN) {
+ serf_connection_t *conn = io->u.conn;
+ serf_context_t *ctx = conn->ctx;
+
+ /* If this connection has already failed, return the error again, and try
+ * to remove it from the pollset again
+ */
+ if (conn->status) {
+ tdesc.desc_type = APR_POLL_SOCKET;
+ tdesc.desc.s = conn->skt;
+ tdesc.reqevents = conn->reqevents;
+ ctx->pollset_rm(ctx->pollset_baton,
+ &tdesc, conn);
+ return conn->status;
+ }
+ /* apr_pollset_poll() can return a conn multiple times... */
+ if ((conn->seen_in_pollset & desc->rtnevents) != 0 ||
+ (conn->seen_in_pollset & APR_POLLHUP) != 0) {
+ return APR_SUCCESS;
+ }
+
+ conn->seen_in_pollset |= desc->rtnevents;
+
+ if ((conn->status = serf__process_connection(conn,
+ desc->rtnevents)) != APR_SUCCESS) {
+
+ /* it's possible that the connection was already reset and thus the
+ socket cleaned up. */
+ if (conn->skt) {
+ tdesc.desc_type = APR_POLL_SOCKET;
+ tdesc.desc.s = conn->skt;
+ tdesc.reqevents = conn->reqevents;
+ ctx->pollset_rm(ctx->pollset_baton,
+ &tdesc, conn);
+ }
+ return conn->status;
+ }
+ }
+ else if (io->type == SERF_IO_LISTENER) {
+ serf_listener_t *l = io->u.listener;
+
+ status = serf__process_listener(l);
+
+ if (status) {
+ return status;
+ }
+ }
+ else if (io->type == SERF_IO_CLIENT) {
+ serf_incoming_t *c = io->u.client;
+
+ status = serf__process_client(c, desc->rtnevents);
+
+ if (status) {
+ return status;
+ }
+ }
+ return status;
+}
+
+
+apr_status_t serf_context_run(
+ serf_context_t *ctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ apr_int32_t num;
+ const apr_pollfd_t *desc;
+ serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton;
+
+ if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if ((status = apr_pollset_poll(ps->pollset, duration, &num,
+ &desc)) != APR_SUCCESS) {
+ /* ### do we still need to dispatch stuff here?
+ ### look at the potential return codes. map to our defined
+ ### return values? ...
+ */
+ return status;
+ }
+
+ while (num--) {
+ serf_connection_t *conn = desc->client_data;
+
+ status = serf_event_trigger(ctx, conn, desc);
+ if (status) {
+ return status;
+ }
+
+ desc++;
+ }
+
+ return APR_SUCCESS;
+}
+
+
+void serf_context_set_progress_cb(
+ serf_context_t *ctx,
+ const serf_progress_t progress_func,
+ void *progress_baton)
+{
+ ctx->progress_func = progress_func;
+ ctx->progress_baton = progress_baton;
+}
+
+
+serf_bucket_t *serf_context_bucket_socket_create(
+ serf_context_t *ctx,
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *bucket = serf_bucket_socket_create(skt, allocator);
+
+ /* Use serf's default bytes read/written callback */
+ serf_bucket_socket_set_read_progress_cb(bucket,
+ serf__context_progress_delta,
+ ctx);
+
+ return bucket;
+}
+
+
+/* ### this really ought to go somewhere else, but... meh. */
+void serf_lib_version(int *major, int *minor, int *patch)
+{
+ *major = SERF_MAJOR_VERSION;
+ *minor = SERF_MINOR_VERSION;
+ *patch = SERF_PATCH_VERSION;
+}
+
+
+const char *serf_error_string(apr_status_t errcode)
+{
+ switch (errcode)
+ {
+ case SERF_ERROR_CLOSING:
+ return "The connection is closing";
+ case SERF_ERROR_REQUEST_LOST:
+ return "A request has been lost";
+ case SERF_ERROR_WAIT_CONN:
+ return "The connection is blocked, pending further action";
+ case SERF_ERROR_DECOMPRESSION_FAILED:
+ return "An error occurred during decompression";
+ case SERF_ERROR_BAD_HTTP_RESPONSE:
+ return "The server sent an improper HTTP response";
+ case SERF_ERROR_AUTHN_FAILED:
+ return "An error occurred during authentication";
+ case SERF_ERROR_AUTHN_NOT_SUPPORTED:
+ return "The requested authentication type(s) are not supported";
+ case SERF_ERROR_AUTHN_MISSING_ATTRIBUTE:
+ return "An authentication attribute is missing";
+ case SERF_ERROR_AUTHN_INITALIZATION_FAILED:
+ return "Initialization of an authentication type failed";
+
+ default:
+ return NULL;
+ }
+
+ /* NOTREACHED */
+}
diff --git a/design-guide.txt b/design-guide.txt
new file mode 100644
index 0000000..9e931d1
--- /dev/null
+++ b/design-guide.txt
@@ -0,0 +1,152 @@
+APACHE COMMONS: serf -*-indented-text-*-
+
+
+TOPICS
+
+ 1. Introduction
+ 2. Thread Safety
+ 3. Pool Usage
+ 4. Bucket Read Functions
+ 5. Versioning
+ 6. Bucket lifetimes
+
+
+-----------------------------------------------------------------------------
+
+1. INTRODUCTION
+
+This document details various design choices for the serf library. It
+is intended to be a guide for serf developers. Of course, these design
+principles, choices made, etc are a good source of information for
+users of the serf library, too.
+
+
+-----------------------------------------------------------------------------
+
+2. THREAD SAFETY
+
+The serf library should contain no mutable globals, making it is safe
+to use in a multi-threaded environment.
+
+Each "object" within the system does not need to be used from multiple
+threads at a time. Thus, they require no internal mutexes, and can
+disable mutexes within APR objects where applicable (e.g. pools that
+are created).
+
+The objects should not have any thread affinity (i.e. don't use
+thread-local storage). This enables an application to use external
+mutexes to guard entry to the serf objects, which then allows the
+objects to be used from multiple threads.
+
+
+-----------------------------------------------------------------------------
+
+3. POOL USAGE
+
+For general information on the proper use of pools, please see:
+
+ http://cvs.apache.org/viewcvs/*checkout*/apr/docs/pool-design.html
+
+Within serf itself, the buckets introduce a significant issue related
+to pools. Since it is very possible to end up creating *many* buckets
+within a transaction, and that creation could be proportional to an
+incoming or outgoing data stream, a lot of care must be take to avoid
+tying bucket allocations to pools. If a bucket allocated any internal
+memory against a pool, and if that bucket is created an unbounded
+number of times, then the pool memory could be exhausted.
+
+Thus, buckets are allocated using a custom allocator which allows the
+memory to be freed when that bucket is no longer needed. This
+contrasts with pools where the "free" operation occurs over a large
+set of objects, which is problematic if some are still in use.
+
+### need more explanation of strategy/solution ...
+
+
+-----------------------------------------------------------------------------
+
+4. BUCKET READ FUNCTIONS
+
+The bucket reading and peek functions must not block. Each read
+function should return (up to) the specified amount of data. If
+SERF_READ_ALL_AVAIL is passed, then the function should provide
+whatever is immediately available, without blocking.
+
+The peek function does not take a requested length because it is
+non-destructive. It is not possible to "read past" any barrier with a
+peek function. Thus, peek should operate like SERF_READ_ALL_AVAIL.
+
+The return values from the read functions should follow this general
+pattern:
+
+ APR_SUCCESS Some data was returned, and the caller can
+ immediately call the read function again to read
+ more data.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then you must read more data from this bucket
+ before returning to the serf context loop. If a
+ bucket is not completely drained first, then it is
+ possible to deadlock (the server might not read
+ anything until you read everything it has already
+ given to you).
+
+ APR_EAGAIN Some data was returned, but no more is available
+ for now. The caller must "wait for a bit" or wait
+ for some event before attempting to read again
+ (basically, this simply means re-run the serf
+ context loop). Though it shouldn't be done, reading
+ again will, in all likelihood, return zero length
+ data and APR_EAGAIN again.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then it is illegal to immediately read a bucket
+ again after it has returned APR_EAGAIN. You must
+ run the serf context loop again to (potentially)
+ fetch more data for the bucket.
+
+ APR_EOF Some data was returned, and this bucket has no more
+ data available and should not be read again. If you
+ happen to read it again, then it will return zero
+ length data and APR_EOF.
+
+ NOTE: when bucket behavior tracking is enabled,
+ then it is illegal to read this bucket ever again.
+
+ other An error has occurred. No data was returned. The
+ returned length is undefined.
+
+In the above paragraphs, when it says "some data was returned", note
+that this could be data of length zero.
+
+If a length of zero is returned, then the caller should not attempt to
+dereference the data pointer. It may be invalid. Note that there is no
+reason to dereference that pointer, since it doesn't point to any
+valid data.
+
+Any data returned by the bucket should live as long as the bucket, or
+until the next read or peek occurs.
+
+The read_bucket function falls into a very different pattern. See its
+doc string for more information.
+
+
+-----------------------------------------------------------------------------
+
+5. VERSIONING
+
+The serf project uses the APR versioning guidelines described here:
+
+ http://apr.apache.org/versioning.html
+
+
+-----------------------------------------------------------------------------
+
+6. BUCKET LIFETIMES
+
+### flesh out. basically: if you hold a bucket pointer, then you own
+### it. passing a bucket into another transfers ownership. use barrier
+### buckets to limit destruction of a tree of buckets.
+
+
+-----------------------------------------------------------------------------
diff --git a/incoming.c b/incoming.c
new file mode 100644
index 0000000..40562fa
--- /dev/null
+++ b/incoming.c
@@ -0,0 +1,176 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+static apr_status_t read_from_client(serf_incoming_t *client)
+{
+ return APR_ENOTIMPL;
+}
+
+static apr_status_t write_to_client(serf_incoming_t *client)
+{
+ return APR_ENOTIMPL;
+}
+
+apr_status_t serf__process_client(serf_incoming_t *client, apr_int16_t events)
+{
+ apr_status_t rv;
+ if ((events & APR_POLLIN) != 0) {
+ rv = read_from_client(client);
+ if (rv) {
+ return rv;
+ }
+ }
+
+ if ((events & APR_POLLHUP) != 0) {
+ return APR_ECONNRESET;
+ }
+
+ if ((events & APR_POLLERR) != 0) {
+ return APR_EGENERAL;
+ }
+
+ if ((events & APR_POLLOUT) != 0) {
+ rv = write_to_client(client);
+ if (rv) {
+ return rv;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t serf__process_listener(serf_listener_t *l)
+{
+ apr_status_t rv;
+ apr_socket_t *in;
+ apr_pool_t *p;
+ /* THIS IS NOT OPTIMAL */
+ apr_pool_create(&p, l->pool);
+
+ rv = apr_socket_accept(&in, l->skt, p);
+
+ if (rv) {
+ apr_pool_destroy(p);
+ return rv;
+ }
+
+ rv = l->accept_func(l->ctx, l, l->accept_baton, in, p);
+
+ if (rv) {
+ apr_pool_destroy(p);
+ return rv;
+ }
+
+ return rv;
+}
+
+
+apr_status_t serf_incoming_create(
+ serf_incoming_t **client,
+ serf_context_t *ctx,
+ apr_socket_t *insock,
+ void *request_baton,
+ serf_incoming_request_cb_t request,
+ apr_pool_t *pool)
+{
+ apr_status_t rv;
+ serf_incoming_t *ic = apr_palloc(pool, sizeof(*ic));
+
+ ic->ctx = ctx;
+ ic->baton.type = SERF_IO_CLIENT;
+ ic->baton.u.client = ic;
+ ic->request_baton = request_baton;
+ ic->request = request;
+ ic->skt = insock;
+ ic->desc.desc_type = APR_POLL_SOCKET;
+ ic->desc.desc.s = ic->skt;
+ ic->desc.reqevents = APR_POLLIN;
+
+ rv = ctx->pollset_add(ctx->pollset_baton,
+ &ic->desc, &ic->baton);
+ *client = ic;
+
+ return rv;
+}
+
+
+apr_status_t serf_listener_create(
+ serf_listener_t **listener,
+ serf_context_t *ctx,
+ const char *host,
+ apr_uint16_t port,
+ void *accept_baton,
+ serf_accept_client_t accept,
+ apr_pool_t *pool)
+{
+ apr_sockaddr_t *sa;
+ apr_status_t rv;
+ serf_listener_t *l = apr_palloc(pool, sizeof(*l));
+
+ l->ctx = ctx;
+ l->baton.type = SERF_IO_LISTENER;
+ l->baton.u.listener = l;
+ l->accept_func = accept;
+ l->accept_baton = accept_baton;
+
+ apr_pool_create(&l->pool, pool);
+
+ rv = apr_sockaddr_info_get(&sa, host, APR_UNSPEC, port, 0, l->pool);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_create(&l->skt, sa->family,
+ SOCK_STREAM,
+#if APR_MAJOR_VERSION > 0
+ APR_PROTO_TCP,
+#endif
+ l->pool);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_opt_set(l->skt, APR_SO_REUSEADDR, 1);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_bind(l->skt, sa);
+ if (rv)
+ return rv;
+
+ rv = apr_socket_listen(l->skt, 5);
+ if (rv)
+ return rv;
+
+ l->desc.desc_type = APR_POLL_SOCKET;
+ l->desc.desc.s = l->skt;
+ l->desc.reqevents = APR_POLLIN;
+
+ rv = ctx->pollset_add(ctx->pollset_baton,
+ &l->desc, &l->baton);
+ if (rv)
+ return rv;
+
+ *listener = l;
+
+ return APR_SUCCESS;
+}
diff --git a/outgoing.c b/outgoing.c
new file mode 100644
index 0000000..53fac0a
--- /dev/null
+++ b/outgoing.c
@@ -0,0 +1,1431 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr_pools.h>
+#include <apr_poll.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+#include "serf_private.h"
+
+/* cleanup for sockets */
+static apr_status_t clean_skt(void *data)
+{
+ serf_connection_t *conn = data;
+ apr_status_t status = APR_SUCCESS;
+
+ if (conn->skt) {
+ status = apr_socket_close(conn->skt);
+ conn->skt = NULL;
+ }
+
+ return status;
+}
+
+static apr_status_t clean_resp(void *data)
+{
+ serf_request_t *request = data;
+
+ /* The request's RESPOOL is being cleared. */
+
+ /* If the response has allocated some buckets, then destroy them (since
+ the bucket may hold resources other than memory in RESPOOL). Also
+ make sure to set their fields to NULL so connection closure does
+ not attempt to free them again. */
+ if (request->resp_bkt) {
+ serf_bucket_destroy(request->resp_bkt);
+ request->resp_bkt = NULL;
+ }
+ if (request->req_bkt) {
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+ }
+
+ /* ### should we worry about debug stuff, like that performed in
+ ### destroy_request()? should we worry about calling req->handler
+ ### to notify this "cancellation" due to pool clearing? */
+
+ /* This pool just got cleared/destroyed. Don't try to destroy the pool
+ (again) when the request is canceled. */
+ request->respool = NULL;
+
+ return APR_SUCCESS;
+}
+
+/* cleanup for conns */
+static apr_status_t clean_conn(void *data)
+{
+ serf_connection_t *conn = data;
+
+ serf_connection_close(conn);
+
+ return APR_SUCCESS;
+}
+
+/* Update the pollset for this connection. We tweak the pollset based on
+ * whether we want to read and/or write, given conditions within the
+ * connection. If the connection is not (yet) in the pollset, then it
+ * will be added.
+ */
+apr_status_t serf__conn_update_pollset(serf_connection_t *conn)
+{
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+ apr_pollfd_t desc = { 0 };
+
+ if (!conn->skt) {
+ return APR_SUCCESS;
+ }
+
+ /* Remove the socket from the poll set. */
+ desc.desc_type = APR_POLL_SOCKET;
+ desc.desc.s = conn->skt;
+ desc.reqevents = conn->reqevents;
+
+ status = ctx->pollset_rm(ctx->pollset_baton,
+ &desc, conn);
+ if (status && !APR_STATUS_IS_NOTFOUND(status))
+ return status;
+
+ /* Now put it back in with the correct read/write values. */
+ desc.reqevents = APR_POLLHUP | APR_POLLERR;
+ if (conn->requests) {
+ /* If there are any outstanding events, then we want to read. */
+ /* ### not true. we only want to read IF we have sent some data */
+ desc.reqevents |= APR_POLLIN;
+
+ /* If the connection has unwritten data, or there are any requests
+ * that still have buckets to write out, then we want to write.
+ */
+ if (conn->vec_len)
+ desc.reqevents |= APR_POLLOUT;
+ else {
+ serf_request_t *request = conn->requests;
+
+ if ((conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) ||
+ (conn->max_outstanding_requests &&
+ conn->completed_requests - conn->completed_responses >=
+ conn->max_outstanding_requests)) {
+ /* we wouldn't try to write any way right now. */
+ }
+ else {
+ while (request != NULL && request->req_bkt == NULL &&
+ request->written)
+ request = request->next;
+ if (request != NULL)
+ desc.reqevents |= APR_POLLOUT;
+ }
+ }
+ }
+
+ /* If we can have async responses, always look for something to read. */
+ if (conn->async_responses) {
+ desc.reqevents |= APR_POLLIN;
+ }
+
+ /* save our reqevents, so we can pass it in to remove later. */
+ conn->reqevents = desc.reqevents;
+
+ /* Note: even if we don't want to read/write this socket, we still
+ * want to poll it for hangups and errors.
+ */
+ return ctx->pollset_add(ctx->pollset_baton,
+ &desc, &conn->baton);
+}
+
+#ifdef SERF_DEBUG_BUCKET_USE
+
+/* Make sure all response buckets were drained. */
+static void check_buckets_drained(serf_connection_t *conn)
+{
+ serf_request_t *request = conn->requests;
+
+ for ( ; request ; request = request->next ) {
+ if (request->resp_bkt != NULL) {
+ /* ### crap. can't do this. this allocator may have un-drained
+ * ### REQUEST buckets.
+ */
+ /* serf_debug__entered_loop(request->resp_bkt->allocator); */
+ /* ### for now, pretend we closed the conn (resets the tracking) */
+ serf_debug__closed_conn(request->resp_bkt->allocator);
+ }
+ }
+}
+
+#endif
+
+/* Create and connect sockets for any connections which don't have them
+ * yet. This is the core of our lazy-connect behavior.
+ */
+apr_status_t serf__open_connections(serf_context_t *ctx)
+{
+ int i;
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn = GET_CONN(ctx, i);
+ apr_status_t status;
+ apr_socket_t *skt;
+ apr_sockaddr_t *serv_addr;
+
+ conn->seen_in_pollset = 0;
+
+ if (conn->skt != NULL) {
+#ifdef SERF_DEBUG_BUCKET_USE
+ check_buckets_drained(conn);
+#endif
+ continue;
+ }
+
+ /* Delay opening until we have something to deliver! */
+ if (conn->requests == NULL) {
+ continue;
+ }
+
+ apr_pool_clear(conn->skt_pool);
+ apr_pool_cleanup_register(conn->skt_pool, conn, clean_skt, clean_skt);
+
+ /* Do we have to connect to a proxy server? */
+ if (ctx->proxy_address)
+ serv_addr = ctx->proxy_address;
+ else
+ serv_addr = conn->address;
+
+ if ((status = apr_socket_create(&skt, serv_addr->family,
+ SOCK_STREAM,
+#if APR_MAJOR_VERSION > 0
+ APR_PROTO_TCP,
+#endif
+ conn->skt_pool)) != APR_SUCCESS)
+ return status;
+
+ /* Set the socket to be non-blocking */
+ if ((status = apr_socket_timeout_set(skt, 0)) != APR_SUCCESS)
+ return status;
+
+ /* Disable Nagle's algorithm */
+ if ((status = apr_socket_opt_set(skt,
+ APR_TCP_NODELAY, 1)) != APR_SUCCESS)
+ return status;
+
+ /* Configured. Store it into the connection now. */
+ conn->skt = skt;
+
+ /* Now that the socket is set up, let's connect it. This should
+ * return immediately.
+ */
+ if ((status = apr_socket_connect(skt,
+ serv_addr)) != APR_SUCCESS) {
+ if (!APR_STATUS_IS_EINPROGRESS(status))
+ return status;
+ }
+
+ /* Flag our pollset as dirty now that we have a new socket. */
+ conn->dirty_conn = 1;
+ ctx->dirty_pollset = 1;
+
+ /* If the authentication was already started on another connection,
+ prepare this connection (it might be possible to skip some
+ part of the handshaking). */
+ if (ctx->proxy_address) {
+ if (conn->ctx->proxy_authn_info.scheme)
+ conn->ctx->proxy_authn_info.scheme->init_conn_func(407, conn,
+ conn->pool);
+ }
+
+ if (conn->ctx->authn_info.scheme)
+ conn->ctx->authn_info.scheme->init_conn_func(401, conn,
+ conn->pool);
+
+ /* Does this connection require a SSL tunnel over the proxy? */
+ if (ctx->proxy_address && strcmp(conn->host_info.scheme, "https") == 0)
+ serf__ssltunnel_connect(conn);
+ else
+ conn->state = SERF_CONN_CONNECTED;
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t no_more_writes(serf_connection_t *conn,
+ serf_request_t *request)
+{
+ /* Note that we should hold new requests until we open our new socket. */
+ conn->state = SERF_CONN_CLOSING;
+
+ /* We can take the *next* request in our list and assume it hasn't
+ * been written yet and 'save' it for the new socket.
+ */
+ conn->hold_requests = request->next;
+ conn->hold_requests_tail = conn->requests_tail;
+ request->next = NULL;
+ conn->requests_tail = request;
+
+ /* Clear our iovec. */
+ conn->vec_len = 0;
+
+ /* Update the pollset to know we don't want to write on this socket any
+ * more.
+ */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ return APR_SUCCESS;
+}
+
+/* Read the 'Connection' header from the response. Return SERF_ERROR_CLOSING if
+ * the header contains value 'close' indicating the server is closing the
+ * connection right after this response.
+ * Otherwise returns APR_SUCCESS.
+ */
+static apr_status_t is_conn_closing(serf_bucket_t *response)
+{
+ serf_bucket_t *hdrs;
+ const char *val;
+
+ hdrs = serf_bucket_response_get_headers(response);
+ val = serf_bucket_headers_get(hdrs, "Connection");
+ if (val && strcasecmp("close", val) == 0)
+ {
+ return SERF_ERROR_CLOSING;
+ }
+
+ return APR_SUCCESS;
+}
+
+static void link_requests(serf_request_t **list, serf_request_t **tail,
+ serf_request_t *request)
+{
+ if (*list == NULL) {
+ *list = request;
+ *tail = request;
+ }
+ else {
+ (*tail)->next = request;
+ *tail = request;
+ }
+}
+
+static apr_status_t destroy_request(serf_request_t *request)
+{
+ serf_connection_t *conn = request->conn;
+
+ /* The request and response buckets are no longer needed,
+ nor is the request's pool. */
+ if (request->resp_bkt) {
+ serf_debug__closed_conn(request->resp_bkt->allocator);
+ serf_bucket_destroy(request->resp_bkt);
+ request->resp_bkt = NULL;
+ }
+ if (request->req_bkt) {
+ serf_debug__closed_conn(request->req_bkt->allocator);
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+ }
+
+ serf_debug__bucket_alloc_check(request->allocator);
+ if (request->respool) {
+ /* ### unregister the pool cleanup for self? */
+ apr_pool_destroy(request->respool);
+ }
+
+ serf_bucket_mem_free(conn->allocator, request);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t cancel_request(serf_request_t *request,
+ serf_request_t **list,
+ int notify_request)
+{
+ /* If we haven't run setup, then we won't have a handler to call. */
+ if (request->handler && notify_request) {
+ /* We actually don't care what the handler returns.
+ * We have bigger matters at hand.
+ */
+ (*request->handler)(request, NULL, request->handler_baton,
+ request->respool);
+ }
+
+ if (*list == request) {
+ *list = request->next;
+ }
+ else {
+ serf_request_t *scan = *list;
+
+ while (scan->next && scan->next != request)
+ scan = scan->next;
+
+ if (scan->next) {
+ scan->next = scan->next->next;
+ }
+ }
+
+ return destroy_request(request);
+}
+
+static apr_status_t remove_connection(serf_context_t *ctx,
+ serf_connection_t *conn)
+{
+ apr_pollfd_t desc = { 0 };
+
+ desc.desc_type = APR_POLL_SOCKET;
+ desc.desc.s = conn->skt;
+ desc.reqevents = conn->reqevents;
+
+ return ctx->pollset_rm(ctx->pollset_baton,
+ &desc, conn);
+}
+
+static void destroy_ostream(serf_connection_t *conn)
+{
+ if (conn->ostream_head != NULL) {
+ serf_bucket_destroy(conn->ostream_head);
+ conn->ostream_head = NULL;
+ conn->ostream_tail = NULL;
+ }
+}
+
+/* A socket was closed, inform the application. */
+static void handle_conn_closed(serf_connection_t *conn, apr_status_t status)
+{
+ (*conn->closed)(conn, conn->closed_baton, status,
+ conn->pool);
+}
+
+static apr_status_t reset_connection(serf_connection_t *conn,
+ int requeue_requests)
+{
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+ serf_request_t *old_reqs, *held_reqs, *held_reqs_tail;
+
+ conn->probable_keepalive_limit = conn->completed_responses;
+ conn->completed_requests = 0;
+ conn->completed_responses = 0;
+
+ old_reqs = conn->requests;
+ held_reqs = conn->hold_requests;
+ held_reqs_tail = conn->hold_requests_tail;
+
+ if (conn->state == SERF_CONN_CLOSING) {
+ conn->hold_requests = NULL;
+ conn->hold_requests_tail = NULL;
+ }
+
+ conn->requests = NULL;
+ conn->requests_tail = NULL;
+
+ while (old_reqs) {
+ /* If we haven't started to write the connection, bring it over
+ * unchanged to our new socket. Otherwise, call the cancel function.
+ */
+ if (requeue_requests && !old_reqs->written) {
+ serf_request_t *req = old_reqs;
+ old_reqs = old_reqs->next;
+ req->next = NULL;
+ link_requests(&conn->requests, &conn->requests_tail, req);
+ }
+ else {
+ cancel_request(old_reqs, &old_reqs, requeue_requests);
+ }
+ }
+
+ if (conn->requests_tail) {
+ conn->requests_tail->next = held_reqs;
+ }
+ else {
+ conn->requests = held_reqs;
+ }
+ if (held_reqs_tail) {
+ conn->requests_tail = held_reqs_tail;
+ }
+
+ if (conn->skt != NULL) {
+ remove_connection(ctx, conn);
+ status = apr_socket_close(conn->skt);
+ if (conn->closed != NULL) {
+ handle_conn_closed(conn, status);
+ }
+ conn->skt = NULL;
+ }
+
+ if (conn->stream != NULL) {
+ serf_bucket_destroy(conn->stream);
+ conn->stream = NULL;
+ }
+
+ destroy_ostream(conn);
+
+ /* Don't try to resume any writes */
+ conn->vec_len = 0;
+
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ conn->state = SERF_CONN_INIT;
+
+ conn->status = APR_SUCCESS;
+
+ /* Let our context know that we've 'reset' the socket already. */
+ conn->seen_in_pollset |= APR_POLLHUP;
+
+ /* Found the connection. Closed it. All done. */
+ return APR_SUCCESS;
+}
+
+static apr_status_t socket_writev(serf_connection_t *conn)
+{
+ apr_size_t written;
+ apr_status_t status;
+
+ status = apr_socket_sendv(conn->skt, conn->vec,
+ conn->vec_len, &written);
+
+ /* did we write everything? */
+ if (written) {
+ apr_size_t len = 0;
+ int i;
+
+ for (i = 0; i < conn->vec_len; i++) {
+ len += conn->vec[i].iov_len;
+ if (written < len) {
+ if (i) {
+ memmove(conn->vec, &conn->vec[i],
+ sizeof(struct iovec) * (conn->vec_len - i));
+ conn->vec_len -= i;
+ }
+ conn->vec[0].iov_base = (char *)conn->vec[0].iov_base + (conn->vec[0].iov_len - (len - written));
+ conn->vec[0].iov_len = len - written;
+ break;
+ }
+ }
+ if (len == written) {
+ conn->vec_len = 0;
+ }
+
+ /* Log progress information */
+ serf__context_progress_delta(conn->ctx, 0, written);
+ }
+
+ return status;
+}
+
+static apr_status_t detect_eof(void *baton, serf_bucket_t *aggregate_bucket)
+{
+ serf_connection_t *conn = baton;
+ conn->hit_eof = 1;
+ return APR_EAGAIN;
+}
+
+static apr_status_t do_conn_setup(serf_connection_t *conn)
+{
+ apr_status_t status;
+ serf_bucket_t *ostream;
+
+ if (conn->ostream_head == NULL) {
+ conn->ostream_head = serf_bucket_aggregate_create(conn->allocator);
+ }
+
+ if (conn->ostream_tail == NULL) {
+ conn->ostream_tail = serf__bucket_stream_create(conn->allocator,
+ detect_eof,
+ conn);
+ }
+
+ ostream = conn->ostream_tail;
+
+ status = (*conn->setup)(conn->skt,
+ &conn->stream,
+ &ostream,
+ conn->setup_baton,
+ conn->pool);
+ if (status) {
+ /* extra destroy here since it wasn't added to the head bucket yet. */
+ serf_bucket_destroy(conn->ostream_tail);
+ destroy_ostream(conn);
+ return status;
+ }
+
+ serf_bucket_aggregate_append(conn->ostream_head,
+ ostream);
+
+ return status;
+}
+
+/* Set up the input and output stream buckets.
+ When a tunnel over an http proxy is needed, create a socket bucket and
+ empty aggregate bucket for sending and receiving unencrypted requests
+ over the socket.
+
+ After the tunnel is there, or no tunnel was needed, ask the application
+ to create the input and output buckets, which should take care of the
+ [en/de]cryption.
+*/
+
+static apr_status_t prepare_conn_streams(serf_connection_t *conn,
+ serf_bucket_t **istream,
+ serf_bucket_t **ostreamt,
+ serf_bucket_t **ostreamh)
+{
+ apr_status_t status;
+
+ /* Do we need a SSL tunnel first? */
+ if (conn->state == SERF_CONN_CONNECTED) {
+ /* If the connection does not have an associated bucket, then
+ * call the setup callback to get one.
+ */
+ if (conn->stream == NULL) {
+ status = do_conn_setup(conn);
+ if (status) {
+ return status;
+ }
+ }
+ *ostreamt = conn->ostream_tail;
+ *ostreamh = conn->ostream_head;
+ *istream = conn->stream;
+ } else {
+ /* SSL tunnel needed and not set up yet, get a direct unencrypted
+ stream for this socket */
+ if (conn->stream == NULL) {
+ *istream = serf_bucket_socket_create(conn->skt,
+ conn->allocator);
+ }
+ /* Don't create the ostream bucket chain including the ssl_encrypt
+ bucket yet. This ensure the CONNECT request is sent unencrypted
+ to the proxy. */
+ *ostreamt = *ostreamh = conn->ssltunnel_ostream;
+ }
+
+ return APR_SUCCESS;
+}
+
+/* write data out to the connection */
+static apr_status_t write_to_connection(serf_connection_t *conn)
+{
+ serf_request_t *request = conn->requests;
+
+ if (conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) {
+
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+
+ /* backoff for now. */
+ return APR_SUCCESS;
+ }
+
+ /* Find a request that has data which needs to be delivered. */
+ while (request != NULL &&
+ request->req_bkt == NULL && request->written)
+ request = request->next;
+
+ /* assert: request != NULL || conn->vec_len */
+
+ /* Keep reading and sending until we run out of stuff to read, or
+ * writing would block.
+ */
+ while (1) {
+ int stop_reading = 0;
+ apr_status_t status;
+ apr_status_t read_status;
+ serf_bucket_t *ostreamt, *ostreamh;
+ int max_outstanding_requests = conn->max_outstanding_requests;
+
+ /* If we're setting up an ssl tunnel, we can't send real requests
+ at yet, as they need to be encrypted and our encrypt buckets
+ aren't created yet as we still need to read the unencrypted
+ response of the CONNECT request. */
+ if (conn->state != SERF_CONN_CONNECTED)
+ max_outstanding_requests = 1;
+
+ if (max_outstanding_requests &&
+ conn->completed_requests -
+ conn->completed_responses >= max_outstanding_requests) {
+ /* backoff for now. */
+ return APR_SUCCESS;
+ }
+
+ /* If we have unwritten data, then write what we can. */
+ while (conn->vec_len) {
+ status = socket_writev(conn);
+
+ /* If the write would have blocked, then we're done. Don't try
+ * to write anything else to the socket.
+ */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+ if (APR_STATUS_IS_EPIPE(status))
+ return no_more_writes(conn, request);
+ if (status)
+ return status;
+ }
+ /* ### can we have a short write, yet no EAGAIN? a short write
+ ### would imply unwritten_len > 0 ... */
+ /* assert: unwritten_len == 0. */
+
+ /* We may need to move forward to a request which has something
+ * to write.
+ */
+ while (request != NULL &&
+ request->req_bkt == NULL && request->written)
+ request = request->next;
+
+ if (request == NULL) {
+ /* No more requests (with data) are registered with the
+ * connection. Let's update the pollset so that we don't
+ * try to write to this socket again.
+ */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ return APR_SUCCESS;
+ }
+
+ status = prepare_conn_streams(conn, &conn->stream, &ostreamt, &ostreamh);
+ if (status) {
+ return status;
+ }
+
+ if (request->req_bkt == NULL) {
+ /* Now that we are about to serve the request, allocate a pool. */
+ apr_pool_create(&request->respool, conn->pool);
+ request->allocator = serf_bucket_allocator_create(request->respool,
+ NULL, NULL);
+ apr_pool_cleanup_register(request->respool, request,
+ clean_resp, clean_resp);
+
+ /* Fill in the rest of the values for the request. */
+ read_status = request->setup(request, request->setup_baton,
+ &request->req_bkt,
+ &request->acceptor,
+ &request->acceptor_baton,
+ &request->handler,
+ &request->handler_baton,
+ request->respool);
+
+ if (read_status) {
+ /* Something bad happened. Propagate any errors. */
+ return read_status;
+ }
+
+ request->written = 1;
+ serf_bucket_aggregate_append(ostreamt, request->req_bkt);
+ }
+
+ /* ### optimize at some point by using read_for_sendfile */
+ read_status = serf_bucket_read_iovec(ostreamh,
+ SERF_READ_ALL_AVAIL,
+ IOV_MAX,
+ conn->vec,
+ &conn->vec_len);
+
+ if (!conn->hit_eof) {
+ if (APR_STATUS_IS_EAGAIN(read_status) ||
+ read_status == SERF_ERROR_WAIT_CONN) {
+ /* We read some stuff, but should not try to read again. */
+ stop_reading = 1;
+
+ /* ### we should avoid looking for writability for a while so
+ ### that (hopefully) something will appear in the bucket so
+ ### we can actually write something. otherwise, we could
+ ### end up in a CPU spin: socket wants something, but we
+ ### don't have anything (and keep returning EAGAIN)
+ */
+ }
+ else if (read_status && !APR_STATUS_IS_EOF(read_status)) {
+ /* Something bad happened. Propagate any errors. */
+ return read_status;
+ }
+ }
+
+ /* If we got some data, then deliver it. */
+ /* ### what to do if we got no data?? is that a problem? */
+ if (conn->vec_len > 0) {
+ status = socket_writev(conn);
+
+ /* If we can't write any more, or an error occurred, then
+ * we're done here.
+ */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+ if (APR_STATUS_IS_EPIPE(status))
+ return no_more_writes(conn, request);
+ if (APR_STATUS_IS_ECONNRESET(status)) {
+ return no_more_writes(conn, request);
+ }
+ if (status)
+ return status;
+ }
+
+ if (read_status == SERF_ERROR_WAIT_CONN) {
+ stop_reading = 1;
+ }
+ else if (read_status && conn->hit_eof && conn->vec_len == 0) {
+ /* If we hit the end of the request bucket and all of its data has
+ * been written, then clear it out to signify that we're done
+ * sending the request. On the next iteration through this loop:
+ * - if there are remaining bytes they will be written, and as the
+ * request bucket will be completely read it will be destroyed then.
+ * - we'll see if there are other requests that need to be sent
+ * ("pipelining").
+ */
+ conn->hit_eof = 0;
+ serf_bucket_destroy(request->req_bkt);
+ request->req_bkt = NULL;
+
+ /* If our connection has async responses enabled, we're not
+ * going to get a reply back, so kill the request.
+ */
+ if (conn->async_responses) {
+ conn->requests = request->next;
+ destroy_request(request);
+ }
+
+ conn->completed_requests++;
+
+ if (conn->probable_keepalive_limit &&
+ conn->completed_requests > conn->probable_keepalive_limit) {
+ /* backoff for now. */
+ stop_reading = 1;
+ }
+ }
+
+ if (stop_reading) {
+ return APR_SUCCESS;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/* A response message was received from the server, so call
+ the handler as specified on the original request. */
+static apr_status_t handle_response(serf_request_t *request,
+ apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ int consumed_response = 0;
+
+ /* Only enable the new authentication framework if the program has
+ * registered an authentication credential callback.
+ *
+ * This permits older Serf apps to still handle authentication
+ * themselves by not registering credential callbacks.
+ */
+ if (request->conn->ctx->cred_cb) {
+ status = serf__handle_auth_response(&consumed_response,
+ request,
+ request->resp_bkt,
+ request->handler_baton,
+ pool);
+
+ /* If there was an error reading the response (maybe there wasn't
+ enough data available), don't bother passing the response to the
+ application.
+
+ If the authentication was tried, but failed, pass the response
+ to the application, maybe it can do better. */
+ if (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status)) {
+ return status;
+ }
+ }
+
+ if (!consumed_response) {
+ return (*request->handler)(request,
+ request->resp_bkt,
+ request->handler_baton,
+ pool);
+ }
+
+ return status;
+}
+
+/* An async response message was received from the server. */
+static apr_status_t handle_async_response(serf_connection_t *conn,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+
+ if (conn->current_async_response == NULL) {
+ conn->current_async_response =
+ (*conn->async_acceptor)(NULL, conn->stream,
+ conn->async_acceptor_baton, pool);
+ }
+
+ status = (*conn->async_handler)(NULL, conn->current_async_response,
+ conn->async_handler_baton, pool);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ serf_bucket_destroy(conn->current_async_response);
+ conn->current_async_response = NULL;
+ status = APR_SUCCESS;
+ }
+
+ return status;
+}
+
+/* read data from the connection */
+static apr_status_t read_from_connection(serf_connection_t *conn)
+{
+ apr_status_t status;
+ apr_pool_t *tmppool;
+ int close_connection = FALSE;
+
+ /* Whatever is coming in on the socket corresponds to the first request
+ * on our chain.
+ */
+ serf_request_t *request = conn->requests;
+
+ /* assert: request != NULL */
+
+ if ((status = apr_pool_create(&tmppool, conn->pool)) != APR_SUCCESS)
+ goto error;
+
+ /* Invoke response handlers until we have no more work. */
+ while (1) {
+ serf_bucket_t *dummy1, *dummy2;
+
+ apr_pool_clear(tmppool);
+
+ /* Only interested in the input stream here. */
+ status = prepare_conn_streams(conn, &conn->stream, &dummy1, &dummy2);
+ if (status) {
+ goto error;
+ }
+
+ /* We have a different codepath when we can have async responses. */
+ if (conn->async_responses) {
+ /* TODO What about socket errors? */
+ status = handle_async_response(conn, tmppool);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ goto error;
+ }
+ if (status) {
+ goto error;
+ }
+ continue;
+ }
+
+ /* We are reading a response for a request we haven't
+ * written yet!
+ *
+ * This shouldn't normally happen EXCEPT:
+ *
+ * 1) when the other end has closed the socket and we're
+ * pending an EOF return.
+ * 2) Doing the initial SSL handshake - we'll get EAGAIN
+ * as the SSL buckets will hide the handshake from us
+ * but not return any data.
+ *
+ * In these cases, we should not receive any actual user data.
+ *
+ * If we see an EOF (due to an expired timeout), we'll reset the
+ * connection and open a new one.
+ */
+ if (request->req_bkt || !request->written) {
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_read(conn->stream, SERF_READ_ALL_AVAIL,
+ &data, &len);
+
+ if (!status && len) {
+ status = APR_EGENERAL;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ reset_connection(conn, 1);
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ }
+
+ goto error;
+ }
+
+ /* If the request doesn't have a response bucket, then call the
+ * acceptor to get one created.
+ */
+ if (request->resp_bkt == NULL) {
+ request->resp_bkt = (*request->acceptor)(request, conn->stream,
+ request->acceptor_baton,
+ tmppool);
+ apr_pool_clear(tmppool);
+ }
+
+ status = handle_response(request, tmppool);
+
+ /* Some systems will not generate a HUP poll event so we have to
+ * handle the ECONNRESET issue here.
+ */
+ if (APR_STATUS_IS_ECONNRESET(status) ||
+ status == SERF_ERROR_REQUEST_LOST) {
+ reset_connection(conn, 1);
+ status = APR_SUCCESS;
+ goto error;
+ }
+
+ /* If our response handler says it can't do anything more, we now
+ * treat that as a success.
+ */
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ goto error;
+ }
+
+ /* If we received APR_SUCCESS, run this loop again. */
+ if (!status) {
+ continue;
+ }
+
+ close_connection = is_conn_closing(request->resp_bkt);
+
+ if (!APR_STATUS_IS_EOF(status) &&
+ close_connection != SERF_ERROR_CLOSING) {
+ /* Whether success, or an error, there is no more to do unless
+ * this request has been completed.
+ */
+ goto error;
+ }
+
+ /* The request has been fully-delivered, and the response has
+ * been fully-read. Remove it from our queue and loop to read
+ * another response.
+ */
+ conn->requests = request->next;
+
+ destroy_request(request);
+
+ request = conn->requests;
+
+ /* If we're truly empty, update our tail. */
+ if (request == NULL) {
+ conn->requests_tail = NULL;
+ }
+
+ conn->completed_responses++;
+
+ /* We've to rebuild pollset since completed_responses is changed. */
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+
+ /* This means that we're being advised that the connection is done. */
+ if (close_connection == SERF_ERROR_CLOSING) {
+ reset_connection(conn, 1);
+ if (APR_STATUS_IS_EOF(status))
+ status = APR_SUCCESS;
+ goto error;
+ }
+
+ /* The server is suddenly deciding to serve more responses than we've
+ * seen before.
+ *
+ * Let our requests go.
+ */
+ if (conn->probable_keepalive_limit &&
+ conn->completed_responses > conn->probable_keepalive_limit) {
+ conn->probable_keepalive_limit = 0;
+ }
+
+ /* If we just ran out of requests or have unwritten requests, then
+ * update the pollset. We don't want to read from this socket any
+ * more. We are definitely done with this loop, too.
+ */
+ if (request == NULL || !request->written) {
+ conn->dirty_conn = 1;
+ conn->ctx->dirty_pollset = 1;
+ status = APR_SUCCESS;
+ goto error;
+ }
+ }
+
+error:
+ apr_pool_destroy(tmppool);
+ return status;
+}
+
+/* process all events on the connection */
+apr_status_t serf__process_connection(serf_connection_t *conn,
+ apr_int16_t events)
+{
+ apr_status_t status;
+
+ /* POLLHUP/ERR should come after POLLIN so if there's an error message or
+ * the like sitting on the connection, we give the app a chance to read
+ * it before we trigger a reset condition.
+ */
+ if ((events & APR_POLLIN) != 0) {
+ if ((status = read_from_connection(conn)) != APR_SUCCESS)
+ return status;
+
+ /* If we decided to reset our connection, return now as we don't
+ * want to write.
+ */
+ if ((conn->seen_in_pollset & APR_POLLHUP) != 0) {
+ return APR_SUCCESS;
+ }
+ }
+ if ((events & APR_POLLHUP) != 0) {
+ /* The connection got reset by the server. On Windows this can happen
+ when all data is read, so just cleanup the connection and open
+ a new one. */
+ return reset_connection(conn, 1);
+ }
+ if ((events & APR_POLLERR) != 0) {
+ /* We might be talking to a buggy HTTP server that doesn't
+ * do lingering-close. (httpd < 2.1.8 does this.)
+ *
+ * See:
+ *
+ * http://issues.apache.org/bugzilla/show_bug.cgi?id=35292
+ */
+ if (conn->completed_requests && !conn->probable_keepalive_limit) {
+ return reset_connection(conn, 1);
+ }
+ return APR_EGENERAL;
+ }
+ if ((events & APR_POLLOUT) != 0) {
+ if ((status = write_to_connection(conn)) != APR_SUCCESS)
+ return status;
+ }
+ return APR_SUCCESS;
+}
+
+serf_connection_t *serf_connection_create(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool)
+{
+ serf_connection_t *conn = apr_pcalloc(pool, sizeof(*conn));
+
+ conn->ctx = ctx;
+ conn->status = APR_SUCCESS;
+ conn->address = address;
+ conn->setup = setup;
+ conn->setup_baton = setup_baton;
+ conn->closed = closed;
+ conn->closed_baton = closed_baton;
+ conn->pool = pool;
+ conn->allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+ conn->stream = NULL;
+ conn->ostream_head = NULL;
+ conn->ostream_tail = NULL;
+ conn->baton.type = SERF_IO_CONN;
+ conn->baton.u.conn = conn;
+ conn->hit_eof = 0;
+ conn->state = SERF_CONN_INIT;
+
+ /* Create a subpool for our connection. */
+ apr_pool_create(&conn->skt_pool, conn->pool);
+
+ /* register a cleanup */
+ apr_pool_cleanup_register(conn->pool, conn, clean_conn, apr_pool_cleanup_null);
+
+ /* Add the connection to the context. */
+ *(serf_connection_t **)apr_array_push(ctx->conns) = conn;
+
+ return conn;
+}
+
+apr_status_t serf_connection_create2(
+ serf_connection_t **conn,
+ serf_context_t *ctx,
+ apr_uri_t host_info,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ serf_connection_t *c;
+ apr_sockaddr_t *host_address;
+
+ /* Parse the url, store the address of the server. */
+ status = apr_sockaddr_info_get(&host_address,
+ host_info.hostname,
+ APR_UNSPEC, host_info.port, 0, pool);
+ if (status)
+ return status;
+
+ c = serf_connection_create(ctx, host_address, setup, setup_baton,
+ closed, closed_baton, pool);
+
+ /* We're not interested in the path following the hostname. */
+ c->host_url = apr_uri_unparse(c->pool,
+ &host_info,
+ APR_URI_UNP_OMITPATHINFO);
+ c->host_info = host_info;
+
+ *conn = c;
+
+ return status;
+}
+
+apr_status_t serf_connection_reset(
+ serf_connection_t *conn)
+{
+ return reset_connection(conn, 0);
+}
+
+
+apr_status_t serf_connection_close(
+ serf_connection_t *conn)
+{
+ int i;
+ serf_context_t *ctx = conn->ctx;
+ apr_status_t status;
+
+ for (i = ctx->conns->nelts; i--; ) {
+ serf_connection_t *conn_seq = GET_CONN(ctx, i);
+
+ if (conn_seq == conn) {
+ while (conn->requests) {
+ serf_request_cancel(conn->requests);
+ }
+ if (conn->skt != NULL) {
+ remove_connection(ctx, conn);
+ status = apr_socket_close(conn->skt);
+ if (conn->closed != NULL) {
+ handle_conn_closed(conn, status);
+ }
+ conn->skt = NULL;
+ }
+ if (conn->stream != NULL) {
+ serf_bucket_destroy(conn->stream);
+ conn->stream = NULL;
+ }
+
+ /* Remove the connection from the context. We don't want to
+ * deal with it any more.
+ */
+ if (i < ctx->conns->nelts - 1) {
+ /* move later connections over this one. */
+ memmove(
+ &GET_CONN(ctx, i),
+ &GET_CONN(ctx, i + 1),
+ (ctx->conns->nelts - i - 1) * sizeof(serf_connection_t *));
+ }
+ --ctx->conns->nelts;
+
+ /* Found the connection. Closed it. All done. */
+ return APR_SUCCESS;
+ }
+ }
+
+ /* We didn't find the specified connection. */
+ /* ### doc talks about this w.r.t poll structures. use something else? */
+ return APR_NOTFOUND;
+}
+
+
+void serf_connection_set_max_outstanding_requests(
+ serf_connection_t *conn,
+ unsigned int max_requests)
+{
+ conn->max_outstanding_requests = max_requests;
+}
+
+
+void serf_connection_set_async_responses(
+ serf_connection_t *conn,
+ serf_response_acceptor_t acceptor,
+ void *acceptor_baton,
+ serf_response_handler_t handler,
+ void *handler_baton)
+{
+ conn->async_responses = 1;
+ conn->async_acceptor = acceptor;
+ conn->async_acceptor_baton = acceptor_baton;
+ conn->async_handler = handler;
+ conn->async_handler_baton = handler_baton;
+}
+
+
+serf_request_t *serf_connection_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ serf_request_t *request;
+
+ request = serf_bucket_mem_alloc(conn->allocator, sizeof(*request));
+ request->conn = conn;
+ request->setup = setup;
+ request->setup_baton = setup_baton;
+ request->handler = NULL;
+ request->respool = NULL;
+ request->req_bkt = NULL;
+ request->resp_bkt = NULL;
+ request->priority = 0;
+ request->written = 0;
+ request->next = NULL;
+
+ /* Link the request to the end of the request chain. */
+ if (conn->state == SERF_CONN_CLOSING) {
+ link_requests(&conn->hold_requests, &conn->hold_requests_tail, request);
+ }
+ else {
+ link_requests(&conn->requests, &conn->requests_tail, request);
+
+ /* Ensure our pollset becomes writable in context run */
+ conn->ctx->dirty_pollset = 1;
+ conn->dirty_conn = 1;
+ }
+
+ return request;
+}
+
+
+serf_request_t *serf_connection_priority_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton)
+{
+ serf_request_t *request;
+ serf_request_t *iter, *prev;
+
+ request = serf_bucket_mem_alloc(conn->allocator, sizeof(*request));
+ request->conn = conn;
+ request->setup = setup;
+ request->setup_baton = setup_baton;
+ request->handler = NULL;
+ request->respool = NULL;
+ request->req_bkt = NULL;
+ request->resp_bkt = NULL;
+ request->priority = 1;
+ request->written = 0;
+ request->next = NULL;
+
+ /* Link the new request after the last written request, but before all
+ upcoming requests. */
+ if (conn->state == SERF_CONN_CLOSING) {
+ iter = conn->hold_requests;
+ }
+ else {
+ iter = conn->requests;
+ }
+ prev = NULL;
+
+ /* Find a request that has data which needs to be delivered. */
+ while (iter != NULL && iter->req_bkt == NULL && iter->written) {
+ prev = iter;
+ iter = iter->next;
+ }
+
+ /* Advance to next non priority request */
+ while (iter != NULL && iter->priority) {
+ prev = iter;
+ iter = iter->next;
+ }
+
+ if (prev) {
+ request->next = iter;
+ prev->next = request;
+ } else {
+ request->next = iter;
+ if (conn->state == SERF_CONN_CLOSING) {
+ conn->hold_requests = request;
+ }
+ else {
+ conn->requests = request;
+ }
+ }
+
+ if (conn->state != SERF_CONN_CLOSING) {
+ /* Ensure our pollset becomes writable in context run */
+ conn->ctx->dirty_pollset = 1;
+ conn->dirty_conn = 1;
+ }
+
+ return request;
+}
+
+
+apr_status_t serf_request_cancel(serf_request_t *request)
+{
+ return cancel_request(request, &request->conn->requests, 0);
+}
+
+
+apr_pool_t *serf_request_get_pool(const serf_request_t *request)
+{
+ return request->respool;
+}
+
+
+serf_bucket_alloc_t *serf_request_get_alloc(
+ const serf_request_t *request)
+{
+ return request->allocator;
+}
+
+
+serf_connection_t *serf_request_get_conn(
+ const serf_request_t *request)
+{
+ return request->conn;
+}
+
+
+void serf_request_set_handler(
+ serf_request_t *request,
+ const serf_response_handler_t handler,
+ const void **handler_baton)
+{
+ request->handler = handler;
+ request->handler_baton = handler_baton;
+}
+
+
+serf_bucket_t *serf_request_bucket_request_create(
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator)
+{
+ serf_bucket_t *req_bkt, *hdrs_bkt;
+ serf_connection_t *conn = request->conn;
+ serf_context_t *ctx = conn->ctx;
+
+ req_bkt = serf_bucket_request_create(method, uri, body, allocator);
+ hdrs_bkt = serf_bucket_request_get_headers(req_bkt);
+
+ /* Proxy? */
+ if (ctx->proxy_address && conn->host_url)
+ serf_bucket_request_set_root(req_bkt, conn->host_url);
+
+ if (conn->host_info.hostinfo)
+ serf_bucket_headers_setn(hdrs_bkt, "Host",
+ conn->host_info.hostinfo);
+
+ /* Setup server authorization headers */
+ if (ctx->authn_info.scheme)
+ ctx->authn_info.scheme->setup_request_func(401, conn, method, uri,
+ hdrs_bkt);
+
+ /* Setup proxy authorization headers */
+ if (ctx->proxy_authn_info.scheme)
+ ctx->proxy_authn_info.scheme->setup_request_func(407, conn, method,
+ uri, hdrs_bkt);
+
+ return req_bkt;
+}
diff --git a/serf.h b/serf.h
new file mode 100644
index 0000000..6428e08
--- /dev/null
+++ b/serf.h
@@ -0,0 +1,1086 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_H
+#define SERF_H
+
+/**
+ * @file serf.h
+ * @brief Main serf header file
+ */
+
+#include <apr.h>
+#include <apr_errno.h>
+#include <apr_allocator.h>
+#include <apr_pools.h>
+#include <apr_network_io.h>
+#include <apr_time.h>
+#include <apr_poll.h>
+#include <apr_uri.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Forward declare some structures */
+typedef struct serf_context_t serf_context_t;
+
+typedef struct serf_bucket_t serf_bucket_t;
+typedef struct serf_bucket_type_t serf_bucket_type_t;
+typedef struct serf_bucket_alloc_t serf_bucket_alloc_t;
+
+typedef struct serf_connection_t serf_connection_t;
+typedef struct serf_listener_t serf_listener_t;
+typedef struct serf_incoming_t serf_incoming_t;
+typedef struct serf_incoming_request_t serf_incoming_request_t;
+
+typedef struct serf_request_t serf_request_t;
+
+
+/**
+ * @defgroup serf high-level constructs
+ * @ingroup serf
+ * @{
+ */
+
+/**
+ * Serf-specific error codes
+ */
+#define SERF_ERROR_RANGE 100
+
+/* This code is for when this is the last response on this connection:
+ * i.e. do not send any more requests on this connection or expect
+ * any more responses.
+ */
+#define SERF_ERROR_CLOSING (APR_OS_START_USERERR + SERF_ERROR_RANGE + 1)
+/* This code is for when the connection terminated before the request
+ * could be processed on the other side.
+ */
+#define SERF_ERROR_REQUEST_LOST (APR_OS_START_USERERR + SERF_ERROR_RANGE + 2)
+/* This code is for when the connection is blocked - we can not proceed
+ * until something happens - generally due to SSL negotiation-like behavior
+ * where a write() is blocked until a read() is processed.
+ */
+#define SERF_ERROR_WAIT_CONN (APR_OS_START_USERERR + SERF_ERROR_RANGE + 3)
+/* This code is for when something went wrong during deflating compressed
+ * data e.g. a CRC error. */
+#define SERF_ERROR_DECOMPRESSION_FAILED (APR_OS_START_USERERR + \
+ SERF_ERROR_RANGE + 4)
+/* This code is for when a response received from a http server is not in
+ * http-compliant syntax. */
+#define SERF_ERROR_BAD_HTTP_RESPONSE (APR_OS_START_USERERR + \
+ SERF_ERROR_RANGE + 5)
+
+/* General authentication related errors */
+#define SERF_ERROR_AUTHN_FAILED (APR_OS_START_USERERR + SERF_ERROR_RANGE + 90)
+
+/* None of the available authn mechanisms for the request are supported */
+#define SERF_ERROR_AUTHN_NOT_SUPPORTED (APR_OS_START_USERERR + SERF_ERROR_RANGE + 91)
+
+/* Authn was requested by the server but the header lacked some attribute */
+#define SERF_ERROR_AUTHN_MISSING_ATTRIBUTE (APR_OS_START_USERERR + SERF_ERROR_RANGE + 92)
+
+/* Authentication handler initialization related errors */
+#define SERF_ERROR_AUTHN_INITALIZATION_FAILED (APR_OS_START_USERERR +\
+ SERF_ERROR_RANGE + 93)
+
+/* This macro groups errors potentially raised when reading a http response. */
+#define SERF_BAD_RESPONSE_ERROR(status) ((status) \
+ && ((SERF_ERROR_DECOMPRESSION_FAILED == (status)) \
+ ||(SERF_ERROR_BAD_HTTP_RESPONSE == (status))))
+
+/**
+ * Return a string that describes the specified error code.
+ *
+ * If the error code is not one of the above Serf error codes, then
+ * NULL will be returned.
+ *
+ * Note regarding lifetime: the string is a statically-allocated constant
+ */
+const char *serf_error_string(apr_status_t errcode);
+
+
+/**
+ * Create a new context for serf operations.
+ *
+ * A serf context defines a control loop which processes multiple
+ * connections simultaneously.
+ *
+ * The context will be allocated within @a pool.
+ */
+serf_context_t *serf_context_create(
+ apr_pool_t *pool);
+
+/**
+ * Callback function. Add a socket to the externally managed poll set.
+ *
+ * Both @a pfd and @a serf_baton should be used when calling serf_event_trigger
+ * later.
+ */
+typedef apr_status_t (*serf_socket_add_t)(
+ void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton);
+
+/**
+ * Callback function. Remove the socket, identified by both @a pfd and
+ * @a serf_baton from the externally managed poll set.
+ */
+typedef apr_status_t (*serf_socket_remove_t)(
+ void *user_baton,
+ apr_pollfd_t *pfd,
+ void *serf_baton);
+
+/* Create a new context for serf operations.
+ *
+ * Use this function to make serf not use its internal control loop, but
+ * instead rely on an external event loop. Serf will use the @a addf and @a rmf
+ * callbacks to notify of any event on a connection. The @a user_baton will be
+ * passed through the addf and rmf callbacks.
+ *
+ * The context will be allocated within @a pool.
+ */
+serf_context_t *serf_context_create_ex(
+ void *user_baton,
+ serf_socket_add_t addf,
+ serf_socket_remove_t rmf,
+ apr_pool_t *pool);
+
+/**
+ * Make serf process events on a connection, identified by both @a pfd and
+ * @a serf_baton.
+ *
+ * Any outbound data is delivered, and incoming data is made available to
+ * the associated response handlers and their buckets.
+ *
+ * If any data is processed (incoming or outgoing), then this function will
+ * return with APR_SUCCESS.
+ */
+apr_status_t serf_event_trigger(
+ serf_context_t *s,
+ void *serf_baton,
+ const apr_pollfd_t *pfd);
+
+/** @see serf_context_run should not block at all. */
+#define SERF_DURATION_NOBLOCK 0
+/** @see serf_context_run should run for (nearly) "forever". */
+#define SERF_DURATION_FOREVER 2000000000 /* approx 1^31 */
+
+/**
+ * Run the main networking control loop.
+ *
+ * The set of connections defined by the serf context @a ctx are processed.
+ * Any outbound data is delivered, and incoming data is made available to
+ * the associated response handlers and their buckets. This function will
+ * block on the network for no longer than @a duration microseconds.
+ *
+ * If any data is processed (incoming or outgoing), then this function will
+ * return with APR_SUCCESS. Typically, the caller will just want to call it
+ * again to continue processing data.
+ *
+ * If no activity occurs within the specified timeout duration, then
+ * APR_TIMEUP is returned.
+ *
+ * All temporary allocations will be made in @a pool.
+ */
+apr_status_t serf_context_run(
+ serf_context_t *ctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool);
+
+
+apr_status_t serf_context_prerun(
+ serf_context_t *ctx);
+
+/**
+ * Callback function for progress information. @a progress indicates cumulative
+ * number of bytes read or written, for the whole context.
+ */
+typedef void (*serf_progress_t)(
+ void *progress_baton,
+ apr_off_t read,
+ apr_off_t write);
+
+/**
+ * Sets the progress callback function. @a progress_func will be called every
+ * time bytes are read of or written on a socket.
+ */
+void serf_context_set_progress_cb(
+ serf_context_t *ctx,
+ const serf_progress_t progress_func,
+ void *progress_baton);
+
+/** @} */
+
+/**
+ * @defgroup serf connections and requests
+ * @ingroup serf
+ * @{
+ */
+
+/**
+ * When a connection is established, the application needs to wrap some
+ * buckets around @a skt to enable serf to process incoming responses. This
+ * is the control point for assembling connection-level processing logic
+ * around the given socket.
+ *
+ * The @a setup_baton is the baton established at connection creation time.
+ *
+ * This callback corresponds to reading from the server. Since this is an
+ * on-demand activity, we use a callback. The corresponding write operation
+ * is based on the @see serf_request_deliver function, where the application
+ * can assemble the appropriate bucket(s) before delivery.
+ *
+ * The returned bucket should live at least as long as the connection itself.
+ * It is assumed that an appropriate allocator is passed in @a setup_baton.
+ * ### we may want to create a connection-level allocator and pass that
+ * ### along. however, that allocator would *only* be used for this
+ * ### callback. it may be wasteful to create a per-conn allocator, so this
+ * ### baton-based, app-responsible form might be best.
+ *
+ * Responsibility for the buckets is passed to the serf library. They will be
+ * destroyed when the connection is closed.
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef apr_status_t (*serf_connection_setup_t)(
+ apr_socket_t *skt,
+ serf_bucket_t **read_bkt,
+ serf_bucket_t **write_bkt,
+ void *setup_baton,
+ apr_pool_t *pool);
+
+/**
+ * ### need to update docco w.r.t socket. became "stream" recently.
+ * ### the stream does not have a barrier, this callback should generally
+ * ### add a barrier around the stream before incorporating it into a
+ * ### response bucket stack.
+ * ### should serf add the barrier automatically to protect its data
+ * ### structure? i.e. the passed bucket becomes owned rather than
+ * ### borrowed. that might suit overall semantics better.
+ * Accept an incoming response for @a request, and its @a socket. A bucket
+ * for the response should be constructed and returned. This is the control
+ * point for assembling the appropriate wrapper buckets around the socket to
+ * enable processing of the incoming response.
+ *
+ * The @a acceptor_baton is the baton provided when the specified request
+ * was created.
+ *
+ * The request's pool and bucket allocator should be used for any allocations
+ * that need to live for the duration of the response. Care should be taken
+ * to bound the amount of memory stored in this pool -- to ensure that
+ * allocations are not proportional to the amount of data in the response.
+ *
+ * Responsibility for the bucket is passed to the serf library. It will be
+ * destroyed when the response has been fully read (the bucket returns an
+ * APR_EOF status from its read functions).
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+/* ### do we need to return an error? */
+typedef serf_bucket_t * (*serf_response_acceptor_t)(
+ serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool);
+
+/**
+ * Notification callback for when a connection closes.
+ *
+ * This callback is used to inform an application that the @a conn
+ * connection has been (abnormally) closed. The @a closed_baton is the
+ * baton provided when the connection was first opened. The reason for
+ * closure is given in @a why, and will be APR_SUCCESS if the application
+ * requested closure (by clearing the pool used to allocate this
+ * connection or calling serf_connection_close).
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef void (*serf_connection_closed_t)(
+ serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool);
+
+/**
+ * Response data has arrived and should be processed.
+ *
+ * Whenever response data for @a request arrives (initially, or continued data
+ * arrival), this handler is invoked. The response data is available in the
+ * @a response bucket. The @a handler_baton is passed along from the baton
+ * provided by the request setup callback (@see serf_request_setup_t).
+ *
+ * The handler MUST process data from the @a response bucket until the
+ * bucket's read function states it would block (see APR_STATUS_IS_EAGAIN).
+ * The handler is invoked only when new data arrives. If no further data
+ * arrives, and the handler does not process all available data, then the
+ * system can result in a deadlock around the unprocessed, but read, data.
+ *
+ * The handler should return APR_EOF when the response has been fully read.
+ * If calling the handler again would block, APR_EAGAIN should be returned.
+ * If the handler should be invoked again, simply return APR_SUCCESS.
+ *
+ * Note: if the connection closed (at the request of the application, or
+ * because of an (abnormal) termination) while a request is being delivered,
+ * or before a response arrives, then @a response will be NULL. This is the
+ * signal that the request was not delivered properly, and no further
+ * response should be expected (this callback will not be invoked again).
+ * If a request is injected into the connection (during this callback's
+ * execution, or otherwise), then the connection will be reopened.
+ *
+ * All temporary allocations should be made in @a pool.
+ */
+typedef apr_status_t (*serf_response_handler_t)(
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool);
+
+/**
+ * Callback function to be implemented by the application, so that serf
+ * can handle server and proxy authentication.
+ * code = 401 (server) or 407 (proxy).
+ * baton = the baton passed to serf_context_run.
+ * authn_type = one of "Basic", "Digest".
+ */
+typedef apr_status_t (*serf_credentials_callback_t)(
+ char **username,
+ char **password,
+ serf_request_t *request, void *baton,
+ int code, const char *authn_type,
+ const char *realm,
+ apr_pool_t *pool);
+
+/**
+ * Create a new connection associated with the @a ctx serf context.
+ *
+ * A connection will be created to (eventually) connect to the address
+ * specified by @a address. The address must live at least as long as
+ * @a pool (thus, as long as the connection object).
+ *
+ * The connection object will be allocated within @a pool. Clearing or
+ * destroying this pool will close the connection, and terminate any
+ * outstanding requests or responses.
+ *
+ * When the connection is closed (upon request or because of an error),
+ * then the @a closed callback is invoked, and @a closed_baton is passed.
+ *
+ * ### doc on setup(_baton). tweak below comment re: acceptor.
+ * NULL may be passed for @a acceptor and @a closed; default implementations
+ * will be used.
+ *
+ * Note: the connection is not made immediately. It will be opened on
+ * the next call to @see serf_context_run.
+ */
+serf_connection_t *serf_connection_create(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool);
+
+/**
+ * Create a new connection associated with the @a ctx serf context.
+ *
+ * A connection will be created to (eventually) connect to the address
+ * specified by @a address. The address must live at least as long as
+ * @a pool (thus, as long as the connection object).
+ *
+ * The host address will be looked up based on the hostname in @a host_info.
+ *
+ * The connection object will be allocated within @a pool. Clearing or
+ * destroying this pool will close the connection, and terminate any
+ * outstanding requests or responses.
+ *
+ * When the connection is closed (upon request or because of an error),
+ * then the @a closed callback is invoked, and @a closed_baton is passed.
+ *
+ * ### doc on setup(_baton). tweak below comment re: acceptor.
+ * NULL may be passed for @a acceptor and @a closed; default implementations
+ * will be used.
+ *
+ * Note: the connection is not made immediately. It will be opened on
+ * the next call to @see serf_context_run.
+ */
+apr_status_t serf_connection_create2(
+ serf_connection_t **conn,
+ serf_context_t *ctx,
+ apr_uri_t host_info,
+ serf_connection_setup_t setup,
+ void *setup_baton,
+ serf_connection_closed_t closed,
+ void *closed_baton,
+ apr_pool_t *pool);
+
+
+typedef apr_status_t (*serf_accept_client_t)(
+ serf_context_t *ctx,
+ serf_listener_t *l,
+ void *accept_baton,
+ apr_socket_t *insock,
+ apr_pool_t *pool);
+
+apr_status_t serf_listener_create(
+ serf_listener_t **listener,
+ serf_context_t *ctx,
+ const char *host,
+ apr_uint16_t port,
+ void *accept_baton,
+ serf_accept_client_t accept_func,
+ apr_pool_t *pool);
+
+typedef apr_status_t (*serf_incoming_request_cb_t)(
+ serf_context_t *ctx,
+ serf_incoming_request_t *req,
+ void *request_baton,
+ apr_pool_t *pool);
+
+apr_status_t serf_incoming_create(
+ serf_incoming_t **client,
+ serf_context_t *ctx,
+ apr_socket_t *insock,
+ void *request_baton,
+ serf_incoming_request_cb_t request,
+ apr_pool_t *pool);
+
+
+
+
+/**
+ * Reset the connection, but re-open the socket again.
+ */
+apr_status_t serf_connection_reset(
+ serf_connection_t *conn);
+
+/**
+ * Close the connection associated with @a conn and cancel all pending requests.
+ *
+ * The closed callback passed to serf_connection_create() will be invoked
+ * with APR_SUCCESS.
+ */
+apr_status_t serf_connection_close(
+ serf_connection_t *conn);
+
+/**
+ * Sets the maximum number of outstanding requests @a max_requests on the
+ * connection @a conn. Setting max_requests to 0 means unlimited (the default).
+ * Ex.: setting max_requests to 1 means a request is sent when a response on the
+ * previous request was received and handled.
+ */
+void serf_connection_set_max_outstanding_requests(
+ serf_connection_t *conn,
+ unsigned int max_requests);
+
+void serf_connection_set_async_responses(
+ serf_connection_t *conn,
+ serf_response_acceptor_t acceptor,
+ void *acceptor_baton,
+ serf_response_handler_t handler,
+ void *handler_baton);
+
+/**
+ * Setup the @a request for delivery on its connection.
+ *
+ * Right before this is invoked, @a pool will be built within the
+ * connection's pool for the request to use. The associated response will
+ * be allocated within that subpool. An associated bucket allocator will
+ * be built. These items may be fetched from the request object through
+ * @see serf_request_get_pool or @see serf_request_get_alloc.
+ *
+ * The content of the request is specified by the @a req_bkt bucket. When
+ * a response arrives, the @a acceptor callback will be invoked (along with
+ * the @a acceptor_baton) to produce a response bucket. That bucket will then
+ * be passed to @a handler, along with the @a handler_baton.
+ *
+ * The responsibility for the request bucket is passed to the request
+ * object. When the request is done with the bucket, it will be destroyed.
+ */
+typedef apr_status_t (*serf_request_setup_t)(
+ serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool);
+
+/**
+ * Construct a request object for the @a conn connection.
+ *
+ * When it is time to deliver the request, the @a setup callback will
+ * be invoked with the @a setup_baton passed into it to complete the
+ * construction of the request object.
+ *
+ * If the request has not (yet) been delivered, then it may be canceled
+ * with @see serf_request_cancel.
+ *
+ * Invoking any calls other than @see serf_request_cancel before the setup
+ * callback executes is not supported.
+ */
+serf_request_t *serf_connection_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton);
+
+/**
+ * Construct a request object for the @a conn connection, add it in the
+ * list as the next to-be-written request before all unwritten requests.
+ *
+ * When it is time to deliver the request, the @a setup callback will
+ * be invoked with the @a setup_baton passed into it to complete the
+ * construction of the request object.
+ *
+ * If the request has not (yet) been delivered, then it may be canceled
+ * with @see serf_request_cancel.
+ *
+ * Invoking any calls other than @see serf_request_cancel before the setup
+ * callback executes is not supported.
+ */
+serf_request_t *serf_connection_priority_request_create(
+ serf_connection_t *conn,
+ serf_request_setup_t setup,
+ void *setup_baton);
+
+/**
+ * Cancel the request specified by the @a request object.
+ *
+ * If the request has been scheduled for delivery, then its response
+ * handler will be run, passing NULL for the response bucket.
+ *
+ * If the request has already been (partially or fully) delivered, then
+ * APR_EBUSY is returned and the request is *NOT* canceled. To properly
+ * cancel the request, the connection must be closed (by clearing or
+ * destroying its associated pool).
+ */
+apr_status_t serf_request_cancel(
+ serf_request_t *request);
+
+/**
+ * Return the pool associated with @a request.
+ *
+ * WARNING: be very careful about the kinds of things placed into this
+ * pool. In particular, all allocation should be bounded in size, rather
+ * than proportional to any data stream.
+ */
+apr_pool_t *serf_request_get_pool(
+ const serf_request_t *request);
+
+/**
+ * Return the bucket allocator associated with @a request.
+ */
+serf_bucket_alloc_t *serf_request_get_alloc(
+ const serf_request_t *request);
+
+/**
+ * Return the connection associated with @a request.
+ */
+serf_connection_t *serf_request_get_conn(
+ const serf_request_t *request);
+
+/**
+ * Update the @a handler and @a handler_baton for this @a request.
+ *
+ * This can be called after the request has started processing -
+ * subsequent data will be delivered to this new handler.
+ */
+void serf_request_set_handler(
+ serf_request_t *request,
+ const serf_response_handler_t handler,
+ const void **handler_baton);
+
+/**
+ * Configure proxy server settings, to be used by all connections associated
+ * with the @a ctx serf context.
+ *
+ * The next connection will be created to connect to the proxy server
+ * specified by @a address. The address must live at least as long as the
+ * serf context.
+ */
+void serf_config_proxy(
+ serf_context_t *ctx,
+ apr_sockaddr_t *address);
+
+/* Supported authentication types. */
+#define SERF_AUTHN_NONE 0x00
+#define SERF_AUTHN_BASIC 0x01
+#define SERF_AUTHN_DIGEST 0x02
+#define SERF_AUTHN_NTLM 0x04
+#define SERF_AUTHN_NEGOTIATE 0x08
+#define SERF_AUTHN_ALL 0xFF
+
+/**
+ * Define the authentication handlers that serf will try on incoming requests.
+ */
+void serf_config_authn_types(
+ serf_context_t *ctx,
+ int authn_types);
+
+/**
+ * Set the credentials callback handler.
+ */
+void serf_config_credentials_callback(
+ serf_context_t *ctx,
+ serf_credentials_callback_t cred_cb);
+
+/* ### maybe some connection control functions for flood? */
+
+/*** Special bucket creation functions ***/
+
+/**
+ * Create a bucket of type 'socket bucket'.
+ * This is basically a wrapper around @a serf_bucket_socket_create, which
+ * initializes the bucket using connection and/or context specific settings.
+ */
+serf_bucket_t *serf_context_bucket_socket_create(
+ serf_context_t *ctx,
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Create a bucket of type 'request bucket'.
+ * This is basically a wrapper around @a serf_bucket_request_create, which
+ * initializes the bucket using request, connection and/or context specific
+ * settings.
+ *
+ * This function will set following header(s):
+ * - Host: if the connection was created with @a serf_connection_create2.
+ */
+serf_bucket_t *serf_request_bucket_request_create(
+ serf_request_t *request,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+/** @} */
+
+
+/**
+ * @defgroup serf buckets
+ * @ingroup serf
+ * @{
+ */
+
+/** Pass as REQUESTED to the read function of a bucket to read, consume,
+ * and return all available data.
+ */
+#define SERF_READ_ALL_AVAIL ((apr_size_t)-1)
+
+/** Acceptable newline types for bucket->readline(). */
+#define SERF_NEWLINE_CR 0x0001
+#define SERF_NEWLINE_CRLF 0x0002
+#define SERF_NEWLINE_LF 0x0004
+#define SERF_NEWLINE_ANY 0x0007
+
+/** Used to indicate that a newline is not present in the data buffer. */
+/* ### should we make this zero? */
+#define SERF_NEWLINE_NONE 0x0008
+
+/** Used to indicate that a CR was found at the end of a buffer, and CRLF
+ * was acceptable. It may be that the LF is present, but it needs to be
+ * read first.
+ *
+ * Note: an alternative to using this symbol would be for callers to see
+ * the SERF_NEWLINE_CR return value, and know that some "end of buffer" was
+ * reached. While this works well for @see serf_util_readline, it does not
+ * necessary work as well for buckets (there is no obvious "end of buffer",
+ * although there is an "end of bucket"). The other problem with that
+ * alternative is that developers might miss the condition. This symbol
+ * calls out the possibility and ensures that callers will watch for it.
+ */
+#define SERF_NEWLINE_CRLF_SPLIT 0x0010
+
+
+struct serf_bucket_type_t {
+
+ /** name of this bucket type */
+ const char *name;
+
+ /**
+ * Read (and consume) up to @a requested bytes from @a bucket.
+ *
+ * A pointer to the data will be returned in @a data, and its length
+ * is specified by @a len.
+ *
+ * The data will exist until one of two conditions occur:
+ *
+ * 1) this bucket is destroyed
+ * 2) another call to any read function or to peek()
+ *
+ * If an application needs the data to exist for a longer duration,
+ * then it must make a copy.
+ */
+ apr_status_t (*read)(serf_bucket_t *bucket, apr_size_t requested,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Read (and consume) a line of data from @a bucket.
+ *
+ * The acceptable forms of a newline are given by @a acceptable, and
+ * the type found is returned in @a found. If a newline is not present
+ * in the returned data, then SERF_NEWLINE_NONE is stored into @a found.
+ *
+ * A pointer to the data is returned in @a data, and its length is
+ * specified by @a len. The data will include the newline, if present.
+ *
+ * Note that there is no way to limit the amount of data returned
+ * by this function.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*readline)(serf_bucket_t *bucket, int acceptable,
+ int *found,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Read a set of pointer/length pairs from the bucket.
+ *
+ * The size of the @a vecs array is specified by @a vecs_size. The
+ * bucket should fill in elements of the array, and return the number
+ * used in @a vecs_used.
+ *
+ * Each element of @a vecs should specify a pointer to a block of
+ * data and a length of that data.
+ *
+ * The total length of all data elements should not exceed the
+ * amount specified in @a requested.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*read_iovec)(serf_bucket_t *bucket, apr_size_t requested,
+ int vecs_size, struct iovec *vecs,
+ int *vecs_used);
+
+ /**
+ * Read data from the bucket in a form suitable for apr_socket_sendfile()
+ *
+ * On input, hdtr->numheaders and hdtr->numtrailers specify the size
+ * of the hdtr->headers and hdtr->trailers arrays, respectively. The
+ * bucket should fill in the headers and trailers, up to the specified
+ * limits, and set numheaders and numtrailers to the number of iovecs
+ * filled in for each item.
+ *
+ * @a file should be filled in with a file that can be read. If a file
+ * is not available or appropriate, then NULL should be stored. The
+ * file offset for the data should be stored in @a offset, and the
+ * length of that data should be stored in @a len. If a file is not
+ * returned, then @a offset and @a len should be ignored.
+ *
+ * The file position is not required to correspond to @a offset, and
+ * the caller may manipulate it at will.
+ *
+ * The total length of all data elements, and the portion of the
+ * file should not exceed the amount specified in @a requested.
+ *
+ * The lifetime of the data is the same as that of the @see read
+ * function above.
+ */
+ apr_status_t (*read_for_sendfile)(serf_bucket_t *bucket,
+ apr_size_t requested, apr_hdtr_t *hdtr,
+ apr_file_t **file, apr_off_t *offset,
+ apr_size_t *len);
+
+ /**
+ * Look within @a bucket for a bucket of the given @a type. The bucket
+ * must be the "initial" data because it will be consumed by this
+ * function. If the given bucket type is available, then read and consume
+ * it, and return it to the caller.
+ *
+ * This function is usually used by readers that have custom handling
+ * for specific bucket types (e.g. looking for a file bucket to pass
+ * to apr_socket_sendfile).
+ *
+ * If a bucket of the given type is not found, then NULL is returned.
+ *
+ * The returned bucket becomes the responsibility of the caller. When
+ * the caller is done with the bucket, it should be destroyed.
+ */
+ serf_bucket_t * (*read_bucket)(serf_bucket_t *bucket,
+ const serf_bucket_type_t *type);
+
+ /**
+ * Peek, but don't consume, the data in @a bucket.
+ *
+ * Since this function is non-destructive, the implicit read size is
+ * SERF_READ_ALL_AVAIL. The caller can then use whatever amount is
+ * appropriate.
+ *
+ * The @a data parameter will point to the data, and @a len will
+ * specify how much data is available. The lifetime of the data follows
+ * the same rules as the @see read function above.
+ *
+ * Note: if the peek does not return enough data for your particular
+ * use, then you must read/consume some first, then peek again.
+ *
+ * If the returned data represents all available data, then APR_EOF
+ * will be returned. Since this function does not consume data, it
+ * can return the same data repeatedly rather than blocking; thus,
+ * APR_EAGAIN will never be returned.
+ */
+ apr_status_t (*peek)(serf_bucket_t *bucket,
+ const char **data, apr_size_t *len);
+
+ /**
+ * Destroy @a bucket, along with any associated resources.
+ */
+ void (*destroy)(serf_bucket_t *bucket);
+
+ /* ### apr buckets have 'copy', 'split', and 'setaside' functions.
+ ### not sure whether those will be needed in this bucket model.
+ */
+};
+
+/**
+ * Should the use and lifecycle of buckets be tracked?
+ *
+ * When tracking, the system will ensure several semantic requirements
+ * of bucket use:
+ *
+ * - if a bucket returns APR_EAGAIN, one of its read functions should
+ * not be called immediately. the context's run loop should be called.
+ * ### and for APR_EOF, too?
+ * - all buckets must be drained of input before returning to the
+ * context's run loop.
+ * - buckets should not be destroyed before they return APR_EOF unless
+ * the connection is closed for some reason.
+ *
+ * Undefine this symbol to avoid the tracking (and a performance gain).
+ *
+ * ### we may want to examine when/how we provide this. should it always
+ * ### be compiled in? and apps select it before including this header?
+ */
+/* #define SERF_DEBUG_BUCKET_USE */
+
+
+/* Internal macros for tracking bucket use. */
+#ifdef SERF_DEBUG_BUCKET_USE
+#define SERF__RECREAD(b,s) serf_debug__record_read(b,s)
+#else
+#define SERF__RECREAD(b,s) (s)
+#endif
+
+#define serf_bucket_read(b,r,d,l) SERF__RECREAD(b, (b)->type->read(b,r,d,l))
+#define serf_bucket_readline(b,a,f,d,l) \
+ SERF__RECREAD(b, (b)->type->readline(b,a,f,d,l))
+#define serf_bucket_read_iovec(b,r,s,v,u) \
+ SERF__RECREAD(b, (b)->type->read_iovec(b,r,s,v,u))
+#define serf_bucket_read_for_sendfile(b,r,h,f,o,l) \
+ SERF__RECREAD(b, (b)->type->read_for_sendfile(b,r,h,f,o,l))
+#define serf_bucket_read_bucket(b,t) ((b)->type->read_bucket(b,t))
+#define serf_bucket_peek(b,d,l) ((b)->type->peek(b,d,l))
+#define serf_bucket_destroy(b) ((b)->type->destroy(b))
+
+/**
+ * Check whether a real error occurred. Note that bucket read functions
+ * can return EOF and EAGAIN as part of their "normal" operation, so they
+ * should not be considered an error.
+ */
+#define SERF_BUCKET_READ_ERROR(status) ((status) \
+ && !APR_STATUS_IS_EOF(status) \
+ && !APR_STATUS_IS_EAGAIN(status) \
+ && (SERF_ERROR_WAIT_CONN != status))
+
+
+struct serf_bucket_t {
+
+ /** the type of this bucket */
+ const serf_bucket_type_t *type;
+
+ /** bucket-private data */
+ void *data;
+
+ /** the allocator used for this bucket (needed at destroy time) */
+ serf_bucket_alloc_t *allocator;
+};
+
+
+/**
+ * Generic macro to construct "is TYPE" macros.
+ */
+#define SERF_BUCKET_CHECK(b, btype) ((b)->type == &serf_bucket_type_ ## btype)
+
+
+/**
+ * Notification callback for a block that was not returned to the bucket
+ * allocator when its pool was destroyed.
+ *
+ * The block of memory is given by @a block. The baton provided when the
+ * allocator was constructed is passed as @a unfreed_baton.
+ */
+typedef void (*serf_unfreed_func_t)(
+ void *unfreed_baton,
+ void *block);
+
+/**
+ * Create a new allocator for buckets.
+ *
+ * All buckets are associated with a serf bucket allocator. This allocator
+ * will be created within @a pool and will be destroyed when that pool is
+ * cleared or destroyed.
+ *
+ * When the allocator is destroyed, if any allocations were not explicitly
+ * returned (by calling serf_bucket_mem_free), then the @a unfreed callback
+ * will be invoked for each block. @a unfreed_baton will be passed to the
+ * callback.
+ *
+ * If @a unfreed is NULL, then the library will invoke the abort() stdlib
+ * call. Any failure to return memory is a bug in the application, and an
+ * abort can assist with determining what kinds of memory were not freed.
+ */
+serf_bucket_alloc_t *serf_bucket_allocator_create(
+ apr_pool_t *pool,
+ serf_unfreed_func_t unfreed,
+ void *unfreed_baton);
+
+/**
+ * Return the pool that was used for this @a allocator.
+ *
+ * WARNING: the use of this pool for allocations requires a very
+ * detailed understanding of pool behaviors, the bucket system,
+ * and knowledge of the bucket's use within the overall pattern
+ * of request/response behavior.
+ *
+ * See design-guide.txt for more information about pool usage.
+ */
+apr_pool_t *serf_bucket_allocator_get_pool(
+ const serf_bucket_alloc_t *allocator);
+
+
+/**
+ * Utility structure for reading a complete line of input from a bucket.
+ *
+ * Since it is entirely possible for a line to be broken by APR_EAGAIN,
+ * this structure can be used to accumulate the data until a complete line
+ * has been read from a bucket.
+ */
+
+/* This limit applies to the line buffer functions. If an application needs
+ * longer lines, then they will need to manually handle line buffering.
+ */
+#define SERF_LINEBUF_LIMIT 8000
+
+typedef struct {
+
+ /* Current state of the buffer. */
+ enum {
+ SERF_LINEBUF_EMPTY,
+ SERF_LINEBUF_READY,
+ SERF_LINEBUF_PARTIAL,
+ SERF_LINEBUF_CRLF_SPLIT
+ } state;
+
+ /* How much of the buffer have we used? */
+ apr_size_t used;
+
+ /* The line is read into this buffer, minus CR/LF */
+ char line[SERF_LINEBUF_LIMIT];
+
+} serf_linebuf_t;
+
+/**
+ * Initialize the @a linebuf structure.
+ */
+void serf_linebuf_init(serf_linebuf_t *linebuf);
+
+/**
+ * Fetch a line of text from @a bucket, accumulating the line into
+ * @a linebuf. @a acceptable specifies the types of newlines which are
+ * acceptable for this fetch.
+ *
+ * ### we should return a data/len pair so that we can avoid a copy,
+ * ### rather than having callers look into our state and line buffer.
+ */
+apr_status_t serf_linebuf_fetch(
+ serf_linebuf_t *linebuf,
+ serf_bucket_t *bucket,
+ int acceptable);
+
+/** @} */
+
+
+/* Internal functions for bucket use and lifecycle tracking */
+apr_status_t serf_debug__record_read(
+ const serf_bucket_t *bucket,
+ apr_status_t status);
+void serf_debug__entered_loop(
+ serf_bucket_alloc_t *allocator);
+void serf_debug__closed_conn(
+ serf_bucket_alloc_t *allocator);
+void serf_debug__bucket_destroy(
+ const serf_bucket_t *bucket);
+void serf_debug__bucket_alloc_check(
+ serf_bucket_alloc_t *allocator);
+
+/* Version info */
+#define SERF_MAJOR_VERSION 1
+#define SERF_MINOR_VERSION 1
+#define SERF_PATCH_VERSION 0
+
+/* Version number string */
+#define SERF_VERSION_STRING APR_STRINGIFY(SERF_MAJOR_VERSION) "." \
+ APR_STRINGIFY(SERF_MINOR_VERSION) "." \
+ APR_STRINGIFY(SERF_PATCH_VERSION)
+
+/**
+ * Check at compile time if the Serf version is at least a certain
+ * level.
+ * @param major The major version component of the version checked
+ * for (e.g., the "1" of "1.3.0").
+ * @param minor The minor version component of the version checked
+ * for (e.g., the "3" of "1.3.0").
+ * @param patch The patch level component of the version checked
+ * for (e.g., the "0" of "1.3.0").
+ */
+#define SERF_VERSION_AT_LEAST(major,minor,patch) \
+(((major) < SERF_MAJOR_VERSION) \
+ || ((major) == SERF_MAJOR_VERSION && (minor) < SERF_MINOR_VERSION) \
+ || ((major) == SERF_MAJOR_VERSION && (minor) == SERF_MINOR_VERSION && \
+ (patch) <= SERF_PATCH_VERSION))
+
+
+/**
+ * Returns the version of the library the application has linked/loaded.
+ * Values are returned in @a major, @a minor, and @a patch.
+ *
+ * Applications will want to use this function to verify compatibility,
+ * expecially while serf has not reached a 1.0 milestone. APIs and
+ * semantics may change drastically until the library hits 1.0.
+ */
+void serf_lib_version(
+ int *major,
+ int *minor,
+ int *patch);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/*
+ * Every user of serf will want to deal with our various bucket types.
+ * Go ahead and include that header right now.
+ *
+ * Note: make sure this occurs outside of the C++ namespace block
+ */
+#include "serf_bucket_types.h"
+
+
+#endif /* !SERF_H */
diff --git a/serf.mak b/serf.mak
new file mode 100644
index 0000000..4dd6057
--- /dev/null
+++ b/serf.mak
@@ -0,0 +1,213 @@
+#**** serf Win32 -*- Makefile -*- ********************************************
+#
+# Define DEBUG_BUILD to create a debug version of the library.
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+
+CFLAGS = /Zi /W3 /EHsc /I "./"
+
+!IF "$(DEBUG_BUILD)" == ""
+INTDIR = Release
+CFLAGS = /MD /O2 /D "NDEBUG" $(CFLAGS)
+STATIC_LIB = $(INTDIR)\serf-1.lib
+!ELSE
+INTDIR = Debug
+CFLAGS = /MDd /Od /W3 /Gm /D "_DEBUG" $(CFLAGS)
+STATIC_LIB = $(INTDIR)\serf-1.lib
+!ENDIF
+
+########
+# Support for OpenSSL integration
+!IF "$(OPENSSL_SRC)" == ""
+!ERROR OpenSSL is required. Please define OPENSSL_SRC.
+!ELSE
+OPENSSL_FLAGS = /I "$(OPENSSL_SRC)\inc32"
+!ENDIF
+
+!IF "$(HTTPD_SRC)" != ""
+!IF "$(APR_SRC)" == ""
+APR_SRC=$(HTTPD_SRC)\srclib\apr
+!ENDIF
+
+!IF "$(APRUTIL_SRC)" == ""
+APRUTIL_SRC=$(HTTPD_SRC)\srclib\apr-util
+!ENDIF
+
+!ENDIF
+
+########
+# APR
+!IF "$(APR_SRC)" == ""
+!ERROR APR is required. Please define APR_SRC or HTTPD_SRC.
+!ENDIF
+
+APR_FLAGS = /I "$(APR_SRC)\include"
+!IF [IF EXIST "$(APR_SRC)\$(INTDIR)\libapr-1.lib" exit 1] == 1
+APR_LIBS = "$(APR_SRC)\$(INTDIR)\libapr-1.lib"
+!ELSE
+APR_LIBS = "$(APR_SRC)\$(INTDIR)\libapr.lib"
+!ENDIF
+
+########
+# APR Util
+!IF "$(APRUTIL_SRC)" == ""
+!ERROR APR-Util is required. Please define APRUTIL_SRC or HTTPD_SRC.
+!ENDIF
+
+APRUTIL_FLAGS = /I "$(APRUTIL_SRC)\include"
+!IF [IF EXIST "$(APRUTIL_SRC)\$(INTDIR)\libaprutil-1.lib" exit 1] == 1
+APRUTIL_LIBS = "$(APRUTIL_SRC)\$(INTDIR)\libaprutil-1.lib"
+!ELSE
+APRUTIL_LIBS = "$(APRUTIL_SRC)\$(INTDIR)\libaprutil.lib"
+!ENDIF
+
+########
+# Support for zlib integration
+!IF "$(ZLIB_SRC)" == ""
+!ERROR ZLib is required. Please define ZLIB_SRC.
+!ELSE
+ZLIB_FLAGS = /I "$(ZLIB_SRC)"
+!IF "$(ZLIB_DLL)" == ""
+!IF "$(ZLIB_LIBDIR)" == ""
+!IF "$(DEBUG_BUILD)" == ""
+ZLIB_LIBS = "$(ZLIB_SRC)\zlibstat.lib"
+!ELSE
+ZLIB_LIBS = "$(ZLIB_SRC)\zlibstatD.lib"
+!ENDIF
+!ELSE
+ZLIB_LIBS = "$(ZLIB_LIBDIR)\x86\ZlibStat$(INTDIR)\zlibstat.lib"
+ZLIB_FLAGS = $(ZLIB_FLAGS) /D ZLIB_WINAPI
+!ENDIF
+!ELSE
+ZLIB_FLAGS = $(ZLIB_FLAGS) /D ZLIB_DLL
+ZLIB_LIBS = "$(ZLIB_SRC)\zlibdll.lib"
+!ENDIF
+!ENDIF
+
+
+# Exclude stuff we don't need from the Win32 headers
+WIN32_DEFS = /D WIN32 /D WIN32_LEAN_AND_MEAN /D NOUSER /D NOGDI /D NONLS /D NOCRYPT /D SERF_HAVE_SSPI
+
+CPP=cl.exe
+CPP_PROJ = /c /nologo $(CFLAGS) $(WIN32_DEFS) $(APR_FLAGS) $(APRUTIL_FLAGS) $(OPENSSL_FLAGS) $(ZLIB_FLAGS) /Fo"$(INTDIR)\\" /Fd"$(INTDIR)\\"
+LIB32=link.exe
+LIB32_FLAGS=/nologo
+
+LIB32_OBJS= \
+ "$(INTDIR)\aggregate_buckets.obj" \
+ "$(INTDIR)\auth.obj" \
+ "$(INTDIR)\auth_basic.obj" \
+ "$(INTDIR)\auth_digest.obj" \
+ "$(INTDIR)\auth_kerb.obj" \
+ "$(INTDIR)\auth_kerb_gss.obj" \
+ "$(INTDIR)\auth_kerb_sspi.obj" \
+ "$(INTDIR)\context.obj" \
+ "$(INTDIR)\ssltunnel.obj" \
+ "$(INTDIR)\allocator.obj" \
+ "$(INTDIR)\barrier_buckets.obj" \
+ "$(INTDIR)\buckets.obj" \
+ "$(INTDIR)\chunk_buckets.obj" \
+ "$(INTDIR)\dechunk_buckets.obj" \
+ "$(INTDIR)\deflate_buckets.obj" \
+ "$(INTDIR)\file_buckets.obj" \
+ "$(INTDIR)\headers_buckets.obj" \
+ "$(INTDIR)\incoming.obj" \
+ "$(INTDIR)\iovec_buckets.obj" \
+ "$(INTDIR)\limit_buckets.obj" \
+ "$(INTDIR)\mmap_buckets.obj" \
+ "$(INTDIR)\outgoing.obj" \
+ "$(INTDIR)\request_buckets.obj" \
+ "$(INTDIR)\response_buckets.obj" \
+ "$(INTDIR)\simple_buckets.obj" \
+ "$(INTDIR)\socket_buckets.obj" \
+ "$(INTDIR)\ssl_buckets.obj" \
+
+!IFDEF OPENSSL_STATIC
+LIB32_OBJS = $(LIB32_OBJS) "$(OPENSSL_SRC)\out32\libeay32.lib" \
+ "$(OPENSSL_SRC)\out32\ssleay32.lib"
+!ELSE
+LIB32_OBJS = $(LIB32_OBJS) "$(OPENSSL_SRC)\out32dll\libeay32.lib" \
+ "$(OPENSSL_SRC)\out32dll\ssleay32.lib"
+!ENDIF
+
+LIB32_OBJS = $(LIB32_OBJS) $(APR_LIBS) $(APRUTIL_LIBS) $(ZLIB_LIBS)
+
+SYS_LIBS = secur32.lib
+
+TEST_OBJS = \
+ "$(INTDIR)\CuTest.obj" \
+ "$(INTDIR)\test_all.obj" \
+ "$(INTDIR)\test_util.obj" \
+ "$(INTDIR)\test_context.obj" \
+ "$(INTDIR)\test_buckets.obj" \
+ "$(INTDIR)\test_ssl.obj" \
+ "$(INTDIR)\test_server.obj" \
+
+TEST_LIBS = user32.lib advapi32.lib gdi32.lib ws2_32.lib
+
+
+ALL: INTDIR $(STATIC_LIB) TESTS
+
+CLEAN:
+ -@erase /q "$(INTDIR)" >nul
+
+INTDIR:
+ -@if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
+
+TESTS: $(STATIC_LIB) $(INTDIR)\serf_response.exe $(INTDIR)\serf_get.exe \
+ $(INTDIR)\serf_request.exe $(INTDIR)\test_all.exe
+
+CHECK: INTDIR TESTS
+ $(INTDIR)\serf_response.exe test\testcases\simple.response
+ $(INTDIR)\serf_response.exe test\testcases\chunked-empty.response
+ $(INTDIR)\serf_response.exe test\testcases\chunked.response
+ $(INTDIR)\serf_response.exe test\testcases\chunked-trailers.response
+ $(INTDIR)\serf_response.exe test\testcases\deflate.response
+ $(INTDIR)\test_all.exe
+
+"$(STATIC_LIB)": INTDIR $(LIB32_OBJS)
+ $(LIB32) -lib @<<
+ $(LIB32_FLAGS) $(LIB32_OBJS) $(SYS_LIBS) /OUT:$@
+<<
+
+
+.c{$(INTDIR)}.obj:
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+{auth}.c{$(INTDIR)}.obj:
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+{buckets}.c{$(INTDIR)}.obj:
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+{test}.c{$(INTDIR)}.obj:
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+{test\server}.c{$(INTDIR)}.obj:
+ $(CPP) @<<
+ $(CPP_PROJ) $<
+<<
+
+$(INTDIR)\serf_response.exe: $(INTDIR)\serf_response.obj $(STATIC_LIB)
+ $(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
+
+$(INTDIR)\serf_get.exe: $(INTDIR)\serf_get.obj $(STATIC_LIB)
+ $(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
+
+$(INTDIR)\serf_request.exe: $(INTDIR)\serf_request.obj $(STATIC_LIB)
+ $(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
+
+$(INTDIR)\test_all.exe: $(TEST_OBJS) $(STATIC_LIB)
+ $(LIB32) /DEBUG /OUT:$@ $** $(LIB32_FLAGS) $(TEST_LIBS)
diff --git a/serf.pc.in b/serf.pc.in
new file mode 100644
index 0000000..86df221
--- /dev/null
+++ b/serf.pc.in
@@ -0,0 +1,13 @@
+SERF_MAJOR_VERSION=@SERF_MAJOR_VERSION@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: serf
+Description: HTTP client library
+Version: @SERF_DOTTED_VERSION@
+Requires.private: libssl libcrypto
+Libs: -L${libdir} -lserf-${SERF_MAJOR_VERSION}
+Libs.private: @EXTRA_LIBS@ @SERF_LIBS@ -lz
+Cflags: -I${includedir}
diff --git a/serf_bucket_types.h b/serf_bucket_types.h
new file mode 100644
index 0000000..428600a
--- /dev/null
+++ b/serf_bucket_types.h
@@ -0,0 +1,656 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_BUCKET_TYPES_H
+#define SERF_BUCKET_TYPES_H
+
+#include <apr_mmap.h>
+#include <apr_hash.h>
+
+/* this header and serf.h refer to each other, so take a little extra care */
+#ifndef SERF_H
+#include "serf.h"
+#endif
+
+
+/**
+ * @file serf_bucket_types.h
+ * @brief serf-supported bucket types
+ */
+/* ### this whole file needs docco ... */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_request;
+#define SERF_BUCKET_IS_REQUEST(b) SERF_BUCKET_CHECK((b), request)
+
+serf_bucket_t *serf_bucket_request_create(
+ const char *method,
+ const char *URI,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+/* Send a Content-Length header with @a len. The @a body bucket should
+ contain precisely that much data. */
+void serf_bucket_request_set_CL(
+ serf_bucket_t *bucket,
+ apr_int64_t len);
+
+serf_bucket_t *serf_bucket_request_get_headers(
+ serf_bucket_t *request);
+
+void serf_bucket_request_become(
+ serf_bucket_t *bucket,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *body);
+
+/**
+ * Sets the root url of the remote host. If this request contains a relative
+ * url, it will be prefixed with the root url to form an absolute url.
+ * @a bucket is the request bucket. @a root_url is the absolute url of the
+ * root of the remote host, without the closing '/'.
+ */
+void serf_bucket_request_set_root(
+ serf_bucket_t *bucket,
+ const char *root_url);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_response;
+#define SERF_BUCKET_IS_RESPONSE(b) SERF_BUCKET_CHECK((b), response)
+
+serf_bucket_t *serf_bucket_response_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+#define SERF_HTTP_VERSION(major, minor) ((major) * 1000 + (minor))
+#define SERF_HTTP_11 SERF_HTTP_VERSION(1, 1)
+#define SERF_HTTP_10 SERF_HTTP_VERSION(1, 0)
+
+typedef struct {
+ int version;
+ int code;
+ const char *reason;
+} serf_status_line;
+
+/**
+ * Return the Status-Line information, if available. This function
+ * works like other bucket read functions: it may return APR_EAGAIN or
+ * APR_EOF to signal the state of the bucket for reading. A return
+ * value of APR_SUCCESS will always indicate that status line
+ * information was returned; for other return values the caller must
+ * check the version field in @a sline. A value of 0 means that the
+ * data is not (yet) present.
+ */
+apr_status_t serf_bucket_response_status(
+ serf_bucket_t *bkt,
+ serf_status_line *sline);
+
+/**
+ * Wait for the HTTP headers to be processed for a @a response.
+ *
+ * If the headers are available, APR_SUCCESS is returned.
+ * If the headers aren't available, APR_EAGAIN is returned.
+ */
+apr_status_t serf_bucket_response_wait_for_headers(
+ serf_bucket_t *response);
+
+/**
+ * Get the headers bucket for @a response.
+ */
+serf_bucket_t *serf_bucket_response_get_headers(
+ serf_bucket_t *response);
+
+/**
+ * Advise the response @a bucket that this was from a HEAD request and
+ * that it should not expect to see a response body.
+ */
+void serf_bucket_response_set_head(
+ serf_bucket_t *bucket);
+
+/* ==================================================================== */
+
+extern const serf_bucket_type_t serf_bucket_type_bwtp_frame;
+#define SERF_BUCKET_IS_BWTP_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_frame)
+
+extern const serf_bucket_type_t serf_bucket_type_bwtp_incoming_frame;
+#define SERF_BUCKET_IS_BWTP_INCOMING_FRAME(b) SERF_BUCKET_CHECK((b), bwtp_incoming_frame)
+
+int serf_bucket_bwtp_frame_get_channel(
+ serf_bucket_t *hdr);
+
+int serf_bucket_bwtp_frame_get_type(
+ serf_bucket_t *hdr);
+
+const char *serf_bucket_bwtp_frame_get_phrase(
+ serf_bucket_t *hdr);
+
+serf_bucket_t *serf_bucket_bwtp_frame_get_headers(
+ serf_bucket_t *hdr);
+
+serf_bucket_t *serf_bucket_bwtp_channel_open(
+ int channel,
+ const char *URI,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_channel_close(
+ int channel,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_header_create(
+ int channel,
+ const char *phrase,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_message_create(
+ int channel,
+ serf_bucket_t *body,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_bwtp_incoming_frame_create(
+ serf_bucket_t *bkt,
+ serf_bucket_alloc_t *allocator);
+
+apr_status_t serf_bucket_bwtp_incoming_frame_wait_for_headers(
+ serf_bucket_t *bkt);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_aggregate;
+#define SERF_BUCKET_IS_AGGREGATE(b) SERF_BUCKET_CHECK((b), aggregate)
+
+typedef apr_status_t (*serf_bucket_aggregate_eof_t)(
+ void *baton,
+ serf_bucket_t *aggregate_bucket);
+
+/** serf_bucket_aggregate_cleanup will instantly destroy all buckets in
+ the aggregate bucket that have been read completely. Whereas normally,
+ these buckets are destroyed on every read operation. */
+void serf_bucket_aggregate_cleanup(
+ serf_bucket_t *bucket,
+ serf_bucket_alloc_t *allocator);
+
+serf_bucket_t *serf_bucket_aggregate_create(
+ serf_bucket_alloc_t *allocator);
+
+/* Creates a stream bucket.
+ A stream bucket is like an aggregate bucket, but:
+ - it doesn't destroy its child buckets on cleanup
+ - one can always keep adding child buckets, the handler FN should return
+ APR_EOF when no more buckets will be added.
+
+ Note: keep this factory function internal for now. If it turns out this
+ bucket type is useful outside serf, we should make it an actual separate
+ type.
+ */
+serf_bucket_t *serf__bucket_stream_create(
+ serf_bucket_alloc_t *allocator,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton);
+
+/** Transform @a bucket in-place into an aggregate bucket. */
+void serf_bucket_aggregate_become(
+ serf_bucket_t *bucket);
+
+void serf_bucket_aggregate_prepend(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *prepend_bucket);
+
+void serf_bucket_aggregate_append(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_t *append_bucket);
+
+void serf_bucket_aggregate_hold_open(
+ serf_bucket_t *aggregate_bucket,
+ serf_bucket_aggregate_eof_t fn,
+ void *baton);
+
+void serf_bucket_aggregate_prepend_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count);
+
+void serf_bucket_aggregate_append_iovec(
+ serf_bucket_t *aggregate_bucket,
+ struct iovec *vecs,
+ int vecs_count);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_file;
+#define SERF_BUCKET_IS_FILE(b) SERF_BUCKET_CHECK((b), file)
+
+serf_bucket_t *serf_bucket_file_create(
+ apr_file_t *file,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_socket;
+#define SERF_BUCKET_IS_SOCKET(b) SERF_BUCKET_CHECK((b), socket)
+
+serf_bucket_t *serf_bucket_socket_create(
+ apr_socket_t *skt,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Call @a progress_func every time bytes are read from the socket, pass
+ * the number of bytes read.
+ *
+ * When using serf's bytes read & written progress indicator, pass
+ * @a serf_context_progress_delta for progress_func and the serf_context for
+ * progress_baton.
+ */
+void serf_bucket_socket_set_read_progress_cb(
+ serf_bucket_t *bucket,
+ const serf_progress_t progress_func,
+ void *progress_baton);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_simple;
+#define SERF_BUCKET_IS_SIMPLE(b) SERF_BUCKET_CHECK((b), simple)
+
+typedef void (*serf_simple_freefunc_t)(
+ void *baton,
+ const char *data);
+
+serf_bucket_t *serf_bucket_simple_create(
+ const char *data,
+ apr_size_t len,
+ serf_simple_freefunc_t freefunc,
+ void *freefunc_baton,
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Equivalent to serf_bucket_simple_create, except that the bucket takes
+ * ownership of a private copy of the data.
+ */
+serf_bucket_t *serf_bucket_simple_copy_create(
+ const char *data,
+ apr_size_t len,
+ serf_bucket_alloc_t *allocator);
+
+#define SERF_BUCKET_SIMPLE_STRING(s,a) \
+ serf_bucket_simple_create(s, strlen(s), NULL, NULL, a);
+
+#define SERF_BUCKET_SIMPLE_STRING_LEN(s,l,a) \
+ serf_bucket_simple_create(s, l, NULL, NULL, a);
+
+/* ==================================================================== */
+
+
+/* Note: apr_mmap_t is always defined, but if APR doesn't have mmaps, then
+ the caller can never create an apr_mmap_t to pass to this function. */
+
+extern const serf_bucket_type_t serf_bucket_type_mmap;
+#define SERF_BUCKET_IS_MMAP(b) SERF_BUCKET_CHECK((b), mmap)
+
+serf_bucket_t *serf_bucket_mmap_create(
+ apr_mmap_t *mmap,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_headers;
+#define SERF_BUCKET_IS_HEADERS(b) SERF_BUCKET_CHECK((b), headers)
+
+serf_bucket_t *serf_bucket_headers_create(
+ serf_bucket_alloc_t *allocator);
+
+/**
+ * Set, default: value copied.
+ *
+ * Set the specified @a header within the bucket, copying the @a value
+ * into space from this bucket's allocator. The header is NOT copied,
+ * so it should remain in scope at least as long as the bucket.
+ */
+void serf_bucket_headers_set(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, copies: header and value copied.
+ *
+ * Copy the specified @a header and @a value into the bucket, using space
+ * from this bucket's allocator.
+ */
+void serf_bucket_headers_setc(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, no copies.
+ *
+ * Set the specified @a header and @a value into the bucket, without
+ * copying either attribute. Both attributes should remain in scope at
+ * least as long as the bucket.
+ *
+ * @note In the case where a header already exists this will result
+ * in a reallocation and copy, @see serf_bucket_headers_setn.
+ */
+void serf_bucket_headers_setn(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ const char *value);
+
+/**
+ * Set, extended: fine grained copy control of header and value.
+ *
+ * Set the specified @a header, with length @a header_size with the
+ * @a value, and length @a value_size, into the bucket. The header will
+ * be copied if @a header_copy is set, and the value is copied if
+ * @a value_copy is set. If the values are not copied, then they should
+ * remain in scope at least as long as the bucket.
+ *
+ * If @a headers_bucket already contains a header with the same name
+ * as @a header, then append @a value to the existing value,
+ * separating with a comma (as per RFC 2616, section 4.2). In this
+ * case, the new value must be allocated and the header re-used, so
+ * behave as if @a value_copy were true and @a header_copy false.
+ */
+void serf_bucket_headers_setx(
+ serf_bucket_t *headers_bucket,
+ const char *header,
+ apr_size_t header_size,
+ int header_copy,
+ const char *value,
+ apr_size_t value_size,
+ int value_copy);
+
+const char *serf_bucket_headers_get(
+ serf_bucket_t *headers_bucket,
+ const char *header);
+
+/**
+ * @param baton opaque baton as passed to @see serf_bucket_headers_do
+ * @param key The header key from this iteration through the table
+ * @param value The header value from this iteration through the table
+ */
+typedef int (serf_bucket_headers_do_callback_fn_t)(
+ void *baton,
+ const char *key,
+ const char *value);
+
+/**
+ * Iterates over all headers of the message and invokes the callback
+ * function with header key and value. Stop iterating when no more
+ * headers are available or when the callback function returned a
+ * non-0 value.
+ *
+ * @param headers_bucket headers to iterate over
+ * @param func callback routine to invoke for every header in the bucket
+ * @param baton baton to pass on each invocation to func
+ */
+void serf_bucket_headers_do(
+ serf_bucket_t *headers_bucket,
+ serf_bucket_headers_do_callback_fn_t func,
+ void *baton);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_chunk;
+#define SERF_BUCKET_IS_CHUNK(b) SERF_BUCKET_CHECK((b), chunk)
+
+serf_bucket_t *serf_bucket_chunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_dechunk;
+#define SERF_BUCKET_IS_DECHUNK(b) SERF_BUCKET_CHECK((b), dechunk)
+
+serf_bucket_t *serf_bucket_dechunk_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_deflate;
+#define SERF_BUCKET_IS_DEFLATE(b) SERF_BUCKET_CHECK((b), deflate)
+
+#define SERF_DEFLATE_GZIP 0
+#define SERF_DEFLATE_DEFLATE 1
+
+serf_bucket_t *serf_bucket_deflate_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator,
+ int format);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_limit;
+#define SERF_BUCKET_IS_LIMIT(b) SERF_BUCKET_CHECK((b), limit)
+
+serf_bucket_t *serf_bucket_limit_create(
+ serf_bucket_t *stream,
+ apr_uint64_t limit,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+#define SERF_SSL_CERT_NOTYETVALID 1
+#define SERF_SSL_CERT_EXPIRED 2
+#define SERF_SSL_CERT_UNKNOWNCA 4
+#define SERF_SSL_CERT_SELF_SIGNED 8
+#define SERF_SSL_CERT_UNKNOWN_FAILURE 16
+
+extern const serf_bucket_type_t serf_bucket_type_ssl_encrypt;
+#define SERF_BUCKET_IS_SSL_ENCRYPT(b) SERF_BUCKET_CHECK((b), ssl_encrypt)
+
+typedef struct serf_ssl_context_t serf_ssl_context_t;
+typedef struct serf_ssl_certificate_t serf_ssl_certificate_t;
+
+typedef apr_status_t (*serf_ssl_need_client_cert_t)(
+ void *data,
+ const char **cert_path);
+
+typedef apr_status_t (*serf_ssl_need_cert_password_t)(
+ void *data,
+ const char *cert_path,
+ const char **password);
+
+typedef apr_status_t (*serf_ssl_need_server_cert_t)(
+ void *data,
+ int failures,
+ const serf_ssl_certificate_t *cert);
+
+typedef apr_status_t (*serf_ssl_server_cert_chain_cb_t)(
+ void *data,
+ int failures,
+ int error_depth,
+ const serf_ssl_certificate_t * const * certs,
+ apr_size_t certs_len);
+
+void serf_ssl_client_cert_provider_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_client_cert_t callback,
+ void *data,
+ void *cache_pool);
+
+void serf_ssl_client_cert_password_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_cert_password_t callback,
+ void *data,
+ void *cache_pool);
+
+/**
+ * Set a callback to override the default SSL server certificate validation
+ * algorithm.
+ */
+void serf_ssl_server_cert_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t callback,
+ void *data);
+
+/**
+ * Set callbacks to override the default SSL server certificate validation
+ * algorithm for the current certificate or the entire certificate chain.
+ */
+void serf_ssl_server_cert_chain_callback_set(
+ serf_ssl_context_t *context,
+ serf_ssl_need_server_cert_t cert_callback,
+ serf_ssl_server_cert_chain_cb_t cert_chain_callback,
+ void *data);
+
+/**
+ * Use the default root CA certificates as included with the OpenSSL library.
+ */
+apr_status_t serf_ssl_use_default_certificates(
+ serf_ssl_context_t *context);
+
+/**
+ * Allow SNI indicators to be sent to the server.
+ */
+apr_status_t serf_ssl_set_hostname(
+ serf_ssl_context_t *context, const char *hostname);
+
+/**
+ * Return the depth of the certificate.
+ */
+int serf_ssl_cert_depth(
+ const serf_ssl_certificate_t *cert);
+
+/**
+ * Extract the fields of the issuer in a table with keys (E, CN, OU, O, L,
+ * ST and C). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_issuer(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Extract the fields of the subject in a table with keys (E, CN, OU, O, L,
+ * ST and C). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_subject(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Extract the fields of the certificate in a table with keys (sha1, notBefore,
+ * notAfter). The returned table will be allocated in @a pool.
+ */
+apr_hash_t *serf_ssl_cert_certificate(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Export a certificate to base64-encoded, zero-terminated string.
+ * The returned string is allocated in @a pool. Returns NULL on failure.
+ */
+const char *serf_ssl_cert_export(
+ const serf_ssl_certificate_t *cert,
+ apr_pool_t *pool);
+
+/**
+ * Load a CA certificate file from a path @a file_path. If the file was loaded
+ * and parsed correctly, a certificate @a cert will be created and returned.
+ * This certificate object will be alloced in @a pool.
+ */
+apr_status_t serf_ssl_load_cert_file(
+ serf_ssl_certificate_t **cert,
+ const char *file_path,
+ apr_pool_t *pool);
+
+/**
+ * Adds the certificate @a cert to the list of trusted certificates in
+ * @a ssl_ctx that will be used for verification.
+ * See also @a serf_ssl_load_cert_file.
+ */
+apr_status_t serf_ssl_trust_cert(
+ serf_ssl_context_t *ssl_ctx,
+ serf_ssl_certificate_t *cert);
+
+serf_bucket_t *serf_bucket_ssl_encrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_context,
+ serf_bucket_alloc_t *allocator);
+
+serf_ssl_context_t *serf_bucket_ssl_encrypt_context_get(
+ serf_bucket_t *bucket);
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_ssl_decrypt;
+#define SERF_BUCKET_IS_SSL_DECRYPT(b) SERF_BUCKET_CHECK((b), ssl_decrypt)
+
+serf_bucket_t *serf_bucket_ssl_decrypt_create(
+ serf_bucket_t *stream,
+ serf_ssl_context_t *ssl_context,
+ serf_bucket_alloc_t *allocator);
+
+serf_ssl_context_t *serf_bucket_ssl_decrypt_context_get(
+ serf_bucket_t *bucket);
+
+
+/* ==================================================================== */
+
+
+extern const serf_bucket_type_t serf_bucket_type_barrier;
+#define SERF_BUCKET_IS_BARRIER(b) SERF_BUCKET_CHECK((b), barrier)
+
+serf_bucket_t *serf_bucket_barrier_create(
+ serf_bucket_t *stream,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+extern const serf_bucket_type_t serf_bucket_type_iovec;
+#define SERF_BUCKET_IS_IOVEC(b) SERF_BUCKET_CHECK((b), iovec)
+
+serf_bucket_t *serf_bucket_iovec_create(
+ struct iovec vecs[],
+ int len,
+ serf_bucket_alloc_t *allocator);
+
+
+/* ==================================================================== */
+
+/* ### do we need a PIPE bucket type? they are simple apr_file_t objects */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !SERF_BUCKET_TYPES_H */
diff --git a/serf_bucket_util.h b/serf_bucket_util.h
new file mode 100644
index 0000000..6ff3035
--- /dev/null
+++ b/serf_bucket_util.h
@@ -0,0 +1,286 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SERF_BUCKET_UTIL_H
+#define SERF_BUCKET_UTIL_H
+
+/**
+ * @file serf_bucket_util.h
+ * @brief This header defines a set of functions and other utilities
+ * for implementing buckets. It is not needed by users of the bucket
+ * system.
+ */
+
+#include "serf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Basic bucket creation function.
+ *
+ * This function will create a bucket of @a type, allocating the necessary
+ * memory from @a allocator. The @a data bucket-private information will
+ * be stored into the bucket.
+ */
+serf_bucket_t *serf_bucket_create(
+ const serf_bucket_type_t *type,
+ serf_bucket_alloc_t *allocator,
+ void *data);
+
+/**
+ * Default implementation of the @see read_iovec functionality.
+ *
+ * This function will use the @see read function to get a block of memory,
+ * then return it in the iovec.
+ */
+apr_status_t serf_default_read_iovec(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ int vecs_size,
+ struct iovec *vecs,
+ int *vecs_used);
+
+/**
+ * Default implementation of the @see read_for_sendfile functionality.
+ *
+ * This function will use the @see read function to get a block of memory,
+ * then return it as a header. No file will be returned.
+ */
+apr_status_t serf_default_read_for_sendfile(
+ serf_bucket_t *bucket,
+ apr_size_t requested,
+ apr_hdtr_t *hdtr,
+ apr_file_t **file,
+ apr_off_t *offset,
+ apr_size_t *len);
+
+/**
+ * Default implementation of the @see read_bucket functionality.
+ *
+ * This function will always return NULL, indicating that the @a type
+ * of bucket cannot be found within @a bucket.
+ */
+serf_bucket_t *serf_default_read_bucket(
+ serf_bucket_t *bucket,
+ const serf_bucket_type_t *type);
+
+/**
+ * Default implementation of the @see destroy functionality.
+ *
+ * This function will return the @a bucket to its allcoator.
+ */
+void serf_default_destroy(
+ serf_bucket_t *bucket);
+
+
+/**
+ * Default implementation of the @see destroy functionality.
+ *
+ * This function will return the @a bucket, and the data member to its
+ * allocator.
+ */
+void serf_default_destroy_and_data(
+ serf_bucket_t *bucket);
+
+
+/**
+ * Allocate @a size bytes of memory using @a allocator.
+ *
+ * Returns NULL of the requested memory size could not be allocated.
+ */
+void *serf_bucket_mem_alloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size);
+
+/**
+ * Allocate @a size bytes of memory using @a allocator and set all of the
+ * memory to 0.
+ *
+ * Returns NULL of the requested memory size could not be allocated.
+ */
+void *serf_bucket_mem_calloc(
+ serf_bucket_alloc_t *allocator,
+ apr_size_t size);
+
+/**
+ * Free the memory at @a block, returning it to @a allocator.
+ */
+void serf_bucket_mem_free(
+ serf_bucket_alloc_t *allocator,
+ void *block);
+
+
+/**
+ * Analogous to apr_pstrmemdup, using a bucket allocator instead.
+ */
+char *serf_bstrmemdup(
+ serf_bucket_alloc_t *allocator,
+ const char *str,
+ apr_size_t size);
+
+/**
+ * Analogous to apr_pmemdup, using a bucket allocator instead.
+ */
+void * serf_bmemdup(
+ serf_bucket_alloc_t *allocator,
+ const void *mem,
+ apr_size_t size);
+
+/**
+ * Analogous to apr_pstrdup, using a bucket allocator instead.
+ */
+char * serf_bstrdup(
+ serf_bucket_alloc_t *allocator,
+ const char *str);
+
+
+/**
+ * Read data up to a newline.
+ *
+ * @a acceptable contains the allowed forms of a newline, and @a found
+ * will return the particular newline type that was found. If a newline
+ * is not found, then SERF_NEWLINE_NONE will be placed in @a found.
+ *
+ * @a data should contain a pointer to the data to be scanned. @a len
+ * should specify the length of that data buffer. On exit, @a data will
+ * be advanced past the newline, and @a len will specify the remaining
+ * amount of data in the buffer.
+ *
+ * Given this pattern of behavior, the caller should store the initial
+ * value of @a data as the line start. The difference between the
+ * returned value of @a data and the saved start is the length of the
+ * line.
+ *
+ * Note that the newline character(s) will remain within the buffer.
+ * This function scans at a byte level for the newline characters. Thus,
+ * the data buffer may contain NUL characters. As a corollary, this
+ * function only works on 8-bit character encodings.
+ *
+ * If the data is fully consumed (@a len gets set to zero) and a CR
+ * character is found at the end and the CRLF sequence is allowed, then
+ * this function may store SERF_NEWLINE_CRLF_SPLIT into @a found. The
+ * caller should take particular consideration for the CRLF sequence
+ * that may be split across data buffer boundaries.
+ */
+void serf_util_readline(
+ const char **data,
+ apr_size_t *len,
+ int acceptable,
+ int *found);
+
+
+/** The buffer size used within @see serf_databuf_t. */
+#define SERF_DATABUF_BUFSIZE 8000
+
+/** Callback function which is used to refill the data buffer.
+ *
+ * The function takes @a baton, which is the @see read_baton value
+ * from the serf_databuf_t structure. Data should be placed into
+ * a buffer specified by @a buf, which is @a bufsize bytes long.
+ * The amount of data read should be returned in @a len.
+ *
+ * APR_EOF should be returned if no more data is available. APR_EAGAIN
+ * should be returned, rather than blocking. In both cases, @a buf
+ * should be filled in and @a len set, as appropriate.
+ */
+typedef apr_status_t (*serf_databuf_reader_t)(
+ void *baton,
+ apr_size_t bufsize,
+ char *buf,
+ apr_size_t *len);
+
+/**
+ * This structure is used as an intermediate data buffer for some "external"
+ * source of data. It works as a scratch pad area for incoming data to be
+ * stored, and then returned as a ptr/len pair by the bucket read functions.
+ *
+ * This structure should be initialized by calling @see serf_databuf_init.
+ * Users should not bother to zero the structure beforehand.
+ */
+typedef struct {
+ /** The current data position within the buffer. */
+ const char *current;
+
+ /** Amount of data remaining in the buffer. */
+ apr_size_t remaining;
+
+ /** Callback function. */
+ serf_databuf_reader_t read;
+
+ /** A baton to hold context-specific data. */
+ void *read_baton;
+
+ /** Records the status from the last @see read operation. */
+ apr_status_t status;
+
+ /** Holds the data until it can be returned. */
+ char buf[SERF_DATABUF_BUFSIZE];
+
+} serf_databuf_t;
+
+/**
+ * Initialize the @see serf_databuf_t structure specified by @a databuf.
+ */
+void serf_databuf_init(
+ serf_databuf_t *databuf);
+
+/**
+ * Implement a bucket-style read function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a requested, @a data, and @a len fields are interpreted and used
+ * as in the read function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_read(
+ serf_databuf_t *databuf,
+ apr_size_t requested,
+ const char **data,
+ apr_size_t *len);
+
+/**
+ * Implement a bucket-style readline function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a acceptable, @a found, @a data, and @a len fields are interpreted
+ * and used as in the read function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_readline(
+ serf_databuf_t *databuf,
+ int acceptable,
+ int *found,
+ const char **data,
+ apr_size_t *len);
+
+/**
+ * Implement a bucket-style peek function from the @see serf_databuf_t
+ * structure given by @a databuf.
+ *
+ * The @a data, and @a len fields are interpreted and used as in the
+ * peek function of @see serf_bucket_t.
+ */
+apr_status_t serf_databuf_peek(
+ serf_databuf_t *databuf,
+ const char **data,
+ apr_size_t *len);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !SERF_BUCKET_UTIL_H */
diff --git a/serf_private.h b/serf_private.h
new file mode 100644
index 0000000..59e4db7
--- /dev/null
+++ b/serf_private.h
@@ -0,0 +1,353 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SERF_PRIVATE_H_
+#define _SERF_PRIVATE_H_
+
+/* ### what the hell? why does the APR interface have a "size" ??
+ ### the implication is that, if we bust this limit, we'd need to
+ ### stop, rebuild a pollset, and repopulate it. what suckage. */
+#define MAX_CONN 16
+
+/* Windows does not define IOV_MAX, so we need to ensure it is defined. */
+#ifndef IOV_MAX
+#define IOV_MAX 16
+#endif
+
+#define SERF_IO_CLIENT (1)
+#define SERF_IO_CONN (2)
+#define SERF_IO_LISTENER (3)
+
+typedef struct serf__authn_scheme_t serf__authn_scheme_t;
+
+typedef struct serf_io_baton_t {
+ int type;
+ union {
+ serf_incoming_t *client;
+ serf_connection_t *conn;
+ serf_listener_t *listener;
+ } u;
+} serf_io_baton_t;
+
+/* Holds all the information corresponding to a request/response pair. */
+struct serf_request_t {
+ serf_connection_t *conn;
+
+ apr_pool_t *respool;
+ serf_bucket_alloc_t *allocator;
+
+ /* The bucket corresponding to the request. Will be NULL once the
+ * bucket has been emptied (for delivery into the socket).
+ */
+ serf_bucket_t *req_bkt;
+
+ serf_request_setup_t setup;
+ void *setup_baton;
+
+ serf_response_acceptor_t acceptor;
+ void *acceptor_baton;
+
+ serf_response_handler_t handler;
+ void *handler_baton;
+
+ serf_bucket_t *resp_bkt;
+
+ int written;
+ int priority;
+
+ struct serf_request_t *next;
+};
+
+typedef struct serf_pollset_t {
+ /* the set of connections to poll */
+ apr_pollset_t *pollset;
+} serf_pollset_t;
+
+typedef struct serf__authn_info_t {
+ const char *realm;
+
+ const serf__authn_scheme_t *scheme;
+
+ void *baton;
+} serf__authn_info_t;
+
+struct serf_context_t {
+ /* the pool used for self and for other allocations */
+ apr_pool_t *pool;
+
+ void *pollset_baton;
+ serf_socket_add_t pollset_add;
+ serf_socket_remove_t pollset_rm;
+
+ /* one of our connections has a dirty pollset state. */
+ int dirty_pollset;
+
+ /* the list of active connections */
+ apr_array_header_t *conns;
+#define GET_CONN(ctx, i) (((serf_connection_t **)(ctx)->conns->elts)[i])
+
+ /* Proxy server address */
+ apr_sockaddr_t *proxy_address;
+
+ /* Progress callback */
+ serf_progress_t progress_func;
+ void *progress_baton;
+ apr_off_t progress_read;
+ apr_off_t progress_written;
+
+ /* authentication info for this context, shared by all connections. */
+ serf__authn_info_t authn_info;
+ serf__authn_info_t proxy_authn_info;
+
+ /* List of authn types supported by the client.*/
+ int authn_types;
+ /* Callback function used to get credentials for a realm. */
+ serf_credentials_callback_t cred_cb;
+};
+
+struct serf_listener_t {
+ serf_context_t *ctx;
+ serf_io_baton_t baton;
+ apr_socket_t *skt;
+ apr_pool_t *pool;
+ apr_pollfd_t desc;
+ void *accept_baton;
+ serf_accept_client_t accept_func;
+};
+
+struct serf_incoming_t {
+ serf_context_t *ctx;
+ serf_io_baton_t baton;
+ void *request_baton;
+ serf_incoming_request_cb_t request;
+ apr_socket_t *skt;
+ apr_pollfd_t desc;
+};
+
+/* States for the different stages in the lifecyle of a connection. */
+typedef enum {
+ SERF_CONN_INIT, /* no socket created yet */
+ SERF_CONN_SETUP_SSLTUNNEL, /* ssl tunnel being setup, no requests sent */
+ SERF_CONN_CONNECTED, /* conn is ready to send requests */
+ SERF_CONN_CLOSING, /* conn is closing, no more requests,
+ start a new socket */
+} serf__connection_state_t;
+
+struct serf_connection_t {
+ serf_context_t *ctx;
+
+ apr_status_t status;
+ serf_io_baton_t baton;
+
+ apr_pool_t *pool;
+ serf_bucket_alloc_t *allocator;
+
+ apr_sockaddr_t *address;
+
+ apr_socket_t *skt;
+ apr_pool_t *skt_pool;
+
+ /* the last reqevents we gave to pollset_add */
+ apr_int16_t reqevents;
+
+ /* the events we've seen for this connection in our returned pollset */
+ apr_int16_t seen_in_pollset;
+
+ /* are we a dirty connection that needs its poll status updated? */
+ int dirty_conn;
+
+ /* number of completed requests we've sent */
+ unsigned int completed_requests;
+
+ /* number of completed responses we've got */
+ unsigned int completed_responses;
+
+ /* keepalive */
+ unsigned int probable_keepalive_limit;
+
+ /* Current state of the connection (whether or not it is connected). */
+ serf__connection_state_t state;
+
+ /* This connection may have responses without a request! */
+ int async_responses;
+ serf_bucket_t *current_async_response;
+ serf_response_acceptor_t async_acceptor;
+ void *async_acceptor_baton;
+ serf_response_handler_t async_handler;
+ void *async_handler_baton;
+
+ /* A bucket wrapped around our socket (for reading responses). */
+ serf_bucket_t *stream;
+ /* A reference to the aggregate bucket that provides the boundary between
+ * request level buckets and connection level buckets.
+ */
+ serf_bucket_t *ostream_head;
+ serf_bucket_t *ostream_tail;
+
+ /* Aggregate bucket used to send the CONNECT request. */
+ serf_bucket_t *ssltunnel_ostream;
+
+ /* The list of active requests. */
+ serf_request_t *requests;
+ serf_request_t *requests_tail;
+
+ /* The list of requests we're holding on to because we're going to
+ * reset the connection soon.
+ */
+ serf_request_t *hold_requests;
+ serf_request_t *hold_requests_tail;
+
+ struct iovec vec[IOV_MAX];
+ int vec_len;
+
+ serf_connection_setup_t setup;
+ void *setup_baton;
+ serf_connection_closed_t closed;
+ void *closed_baton;
+
+ /* Max. number of outstanding requests. */
+ unsigned int max_outstanding_requests;
+
+ int hit_eof;
+ /* Host info. */
+ const char *host_url;
+ apr_uri_t host_info;
+
+ /* connection and authentication scheme specific information */
+ void *authn_baton;
+ void *proxy_authn_baton;
+};
+
+/*** Authentication handler declarations ***/
+
+/**
+ * For each authentication scheme we need a handler function of type
+ * serf__auth_handler_func_t. This function will be called when an
+ * authentication challenge is received in a session.
+ */
+typedef apr_status_t
+(*serf__auth_handler_func_t)(int code,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ const char *auth_hdr,
+ const char *auth_attr,
+ void *baton,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need an initialization function of type
+ * serf__init_context_func_t. This function will be called the first time
+ * serf tries a specific authentication scheme handler.
+ */
+typedef apr_status_t
+(*serf__init_context_func_t)(int code,
+ serf_context_t *conn,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need an initialization function of type
+ * serf__init_conn_func_t. This function will be called when a new
+ * connection is opened.
+ */
+typedef apr_status_t
+(*serf__init_conn_func_t)(int code,
+ serf_connection_t *conn,
+ apr_pool_t *pool);
+
+/**
+ * For each authentication scheme we need a setup_request function of type
+ * serf__setup_request_func_t. This function will be called when a
+ * new serf_request_t object is created and should fill in the correct
+ * authentication headers (if needed).
+ */
+typedef apr_status_t
+(*serf__setup_request_func_t)(int code,
+ serf_connection_t *conn,
+ const char *method,
+ const char *uri,
+ serf_bucket_t *hdrs_bkt);
+
+/**
+ * This function will be called when a response is received, so that the
+ * scheme handler can validate the Authentication related response headers
+ * (if needed).
+ */
+typedef apr_status_t
+(*serf__validate_response_func_t)(int code,
+ serf_connection_t *conn,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ apr_pool_t *pool);
+
+/**
+ * serf__authn_scheme_t: vtable for an authn scheme provider.
+ */
+struct serf__authn_scheme_t {
+ /* The http status code that's handled by this authentication scheme.
+ Normal values are 401 for server authentication and 407 for proxy
+ authentication */
+ int code;
+
+ /* The name of this authentication scheme. This should be a case
+ sensitive match of the string sent in the HTTP authentication header. */
+ const char *name;
+
+ /* Internal code used for this authn type. */
+ int type;
+
+ /* The context initialization function if any; otherwise, NULL */
+ serf__init_context_func_t init_ctx_func;
+
+ /* The connection initialization function if any; otherwise, NULL */
+ serf__init_conn_func_t init_conn_func;
+
+ /* The authentication handler function */
+ serf__auth_handler_func_t handle_func;
+
+ /* Function to set up the authentication header of a request */
+ serf__setup_request_func_t setup_request_func;
+
+ /* Function to validate the authentication header of a response */
+ serf__validate_response_func_t validate_response_func;
+};
+
+/**
+ * Handles a 401 or 407 response, tries the different available authentication
+ * handlers.
+ */
+apr_status_t serf__handle_auth_response(int *consumed_response,
+ serf_request_t *request,
+ serf_bucket_t *response,
+ void *baton,
+ apr_pool_t *pool);
+
+/* fromt context.c */
+void serf__context_progress_delta(void *progress_baton, apr_off_t read,
+ apr_off_t written);
+
+/* from incoming.c */
+apr_status_t serf__process_client(serf_incoming_t *l, apr_int16_t events);
+apr_status_t serf__process_listener(serf_listener_t *l);
+
+/* from outgoing.c */
+apr_status_t serf__open_connections(serf_context_t *ctx);
+apr_status_t serf__process_connection(serf_connection_t *conn,
+ apr_int16_t events);
+apr_status_t serf__conn_update_pollset(serf_connection_t *conn);
+
+/* from ssltunnel.c */
+apr_status_t serf__ssltunnel_connect(serf_connection_t *conn);
+
+#endif
diff --git a/serfmake b/serfmake
new file mode 100755
index 0000000..3ced654
--- /dev/null
+++ b/serfmake
@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+
+import os
+import re
+import shutil
+import sys
+import stat
+import copy
+
+### use get_version() ?
+MAJOR = 1
+
+# Basic defines for our outputs.
+LIBNAME = 'libserf-%d' % (MAJOR,)
+INCLUDES = 'serf-%d' % (MAJOR,)
+PCFILE = 'serf-%d' % (MAJOR,)
+
+
+FILES_HDR = [
+ ('.', 'serf'),
+ ('.', 'serf_bucket_types'),
+ ('.', 'serf_bucket_util'),
+ ]
+
+LIB_FILES = [
+ ('.', 'context'),
+ ('.', 'incoming'),
+ ('.', 'outgoing'),
+ ('.', 'ssltunnel'),
+
+ ('buckets', 'aggregate_buckets'),
+ ('buckets', 'request_buckets'),
+ ('buckets', 'buckets'),
+ ('buckets', 'simple_buckets'),
+ ('buckets', 'file_buckets'),
+ ('buckets', 'mmap_buckets'),
+ ('buckets', 'socket_buckets'),
+ ('buckets', 'response_buckets'),
+ ('buckets', 'headers_buckets'),
+ ('buckets', 'allocator'),
+ ('buckets', 'dechunk_buckets'),
+ ('buckets', 'deflate_buckets'),
+ ('buckets', 'limit_buckets'),
+ ('buckets', 'ssl_buckets'),
+ ('buckets', 'barrier_buckets'),
+ ('buckets', 'chunk_buckets'),
+ ('buckets', 'iovec_buckets'),
+ ('auth', 'auth'),
+ ('auth', 'auth_basic'),
+ ('auth', 'auth_digest'),
+ ('auth', 'auth_kerb'),
+ ('auth', 'auth_kerb_gss'),
+ ]
+
+TEST_DEPS = [
+ ('test', 'CuTest'),
+ ('test', 'test_util'),
+ ('test', 'test_context'),
+ ('test', 'test_buckets'),
+ ('test', 'test_ssl'),
+ ('test/server', 'test_server'),
+ ]
+
+TEST_HDR_FILES = [
+ ('test', 'CuTest'),
+ ('test', 'test_serf'),
+ ]
+
+TEST_FILES = [
+ ('test', 'serf_get'),
+ ('test', 'serf_response'),
+ ('test', 'serf_request'),
+ ('test', 'serf_spider'),
+ ('test', 'test_all'),
+ ]
+
+TESTCASES = [
+ ('test/testcases', 'simple.response'),
+ ('test/testcases', 'chunked-empty.response'),
+ ('test/testcases', 'chunked.response'),
+ ('test/testcases', 'chunked-trailers.response'),
+ ('test/testcases', 'deflate.response'),
+ ]
+
+
+def main(argv):
+ params = {}
+
+ commands = []
+
+ for arg in argv[1:]:
+ idx = arg.find('=')
+ if idx > 0:
+ start = arg.rfind('-', 0, idx)
+ if start > 0:
+ params[arg[start+1:idx]] = arg[idx+1:].strip()
+ else:
+ func = globals().get('cmd_' + arg)
+ if func:
+ commands.append(func)
+ else:
+ print('ERROR: unknown argument: ' + arg)
+ usage()
+
+ if not commands:
+ usage()
+
+ for func in commands:
+ try:
+ func(params)
+ except:
+ print('ERROR: exception:')
+ print(sys.exc_info()[1])
+ print("")
+ usage()
+
+
+def usage():
+ ### print something
+ print('serfmake [cmd] [options]')
+ print('Commands:')
+ print('\tbuild\tBuilds (default)')
+ print('\tcheck\tRuns test cases')
+ print('\tinstall\tInstalls serf into PREFIX')
+ print('\tclean\tCleans')
+ print('Options:')
+ print('\t--with-apr=PATH\tprefix for installed APR and APR-util')
+ print('\t\t\t(needs apr-1-config and apu-1-config; will look in PATH)')
+ print('\t--prefix=PATH\tinstall serf into PATH (default: /usr/local)')
+ print('Quick guide:')
+ print('\tserfmake --prefix=/usr/local/serf --with-apr=/usr/local/apr install')
+ sys.exit(1)
+
+
+def cmd_build(param):
+ builder = Builder(param)
+ builder.build_target(File('.', LIBNAME, 'la'), False)
+ builder.build_target(File('.', PCFILE, 'pc'), False)
+
+
+def cmd_install(param):
+ builder = Builder(param)
+ ### should be called .install_all()
+ builder.install_target(File('.', LIBNAME, 'la'), False)
+
+
+def cmd_check(param):
+ builder = Builder(param)
+ for dirpath, fname in TEST_FILES:
+ builder.build_target(File(dirpath, fname, None), False)
+
+ for dirpath, fname in TESTCASES:
+ case = os.path.join(dirpath, fname)
+ print('== Testing %s ==' % case)
+ result = os.system('%s %s' % (os.path.join('test', 'serf_response'), case))
+ if result:
+ raise TestError("", result)
+
+ # run the test suite based on the CuTest framework
+ result = os.system(os.path.join('test', 'test_all'))
+ if result:
+ raise TestError(case, result)
+
+def cmd_clean(param):
+ targets = [File(dirpath, fname, 'o') for dirpath, fname in LIB_FILES]
+ targets += [File(dirpath, fname, 'lo') for dirpath, fname in LIB_FILES]
+ targets += [File('.', LIBNAME, 'la'),
+ File('.', PCFILE, 'pc'),
+ ]
+ targets += [File(dirpath, fname, 'o') for dirpath, fname in TEST_FILES]
+ targets += [File(dirpath, fname, 'lo') for dirpath, fname in TEST_FILES]
+ targets += [File(dirpath, fname, None) for dirpath, fname in TEST_FILES]
+ targets += [File(dirpath, fname, 'o') for dirpath, fname in TEST_DEPS]
+ targets += [File(dirpath, fname, 'lo') for dirpath, fname in TEST_DEPS]
+
+ clean = [file for file in targets if file.mtime]
+ if clean:
+ sys.stdout.write('Cleaning %d files... ' % len(clean))
+ for i in clean:
+ if i.mtime:
+ os.remove(i.fname)
+ print('done.')
+ else:
+ print('Clean.')
+
+
+class Builder(object):
+ def __init__(self, params):
+ if 'apr' in params:
+ self.apr = APRConfig(params['apr'])
+ self.apu = APUConfig(params['apr'])
+ else:
+ self.apr = APRConfig(None)
+ self.apu = APUConfig(None)
+
+ try:
+ self.prefix = params['prefix']
+ except:
+ self.prefix = '/usr/local'
+
+ ### no way to tweak these
+ self.libdir = os.path.join(self.prefix, 'lib')
+ self.pkgconfigdir = os.path.join(self.prefix, 'lib', 'pkgconfig')
+ self.includedir = os.path.join(self.prefix, 'include', INCLUDES)
+
+ self.load_vars()
+ self.load_deps()
+
+ def load_vars(self):
+ self.CC = self.apr.get_value('CC', '--cc')
+ self.CFLAGS = self.apr.get_value('CFLAGS', '--cflags')
+ self.CPPFLAGS = self.apr.get_value('CPPFLAGS', '--cppflags')
+ self.LIBTOOL = self.apr.get_value('LIBTOOL', '--apr-libtool')
+ self.LDFLAGS = self.apr.get_value('LDFLAGS', '--ldflags') \
+ + ' ' + self.apu.get_value('LDFLAGS', '--ldflags')
+
+ self.INCLUDES = '-I%s -I%s -I%s' % (
+ '.',
+ self.apr.get_value(None, '--includedir'),
+ self.apu.get_value(None, '--includedir'),
+ )
+ if os.getenv('EXTRA_INCLUDES'):
+ self.INCLUDES += ' -I' + os.getenv('EXTRA_INCLUDES')
+
+ self.LIBS = self.apu.get_value(None, '--link-libtool') \
+ + ' ' + self.apu.get_value(None, '--libs') \
+ + ' ' + self.apr.get_value(None, '--link-libtool') \
+ + ' ' + self.apr.get_value(None, '--libs') \
+ + ' -lz'
+ self.SSL_LIBS = '-lssl -lcrypto'
+
+ self.MODE = 644
+
+ def load_deps(self):
+ self.deps = { }
+
+ hdrs = [File(dirpath, fname, 'h') for dirpath, fname in FILES_HDR]
+ libfiles = [File(dirpath, fname, 'c') for dirpath, fname in LIB_FILES]
+ libobjs = [File(dirpath, fname, 'lo') for dirpath, fname in LIB_FILES]
+ for src, obj in zip(libfiles, libobjs):
+ self._add_compile(src, obj, hdrs)
+
+ self.hdrs = hdrs
+
+ all_libs = self.LIBS + ' ' + self.SSL_LIBS
+
+ lib = File('.', LIBNAME, 'la')
+ cmd = '%s --silent --mode=link %s %s -rpath %s -o %s %s %s' % (
+ self.LIBTOOL, self.CC, self.LDFLAGS, self.libdir,
+ lib.fname, ' '.join([l.fname for l in libobjs]), all_libs)
+ self._add_dep(lib, libobjs, cmd)
+
+ # load the test program dependencies now
+ testhdrs = copy.deepcopy(hdrs)
+ testhdrs += [File(dirpath, fname, 'h') for dirpath, fname in TEST_HDR_FILES]
+ testdeps = [File(dirpath, fname, 'c') for dirpath, fname in TEST_DEPS]
+ testobjs = [File(dirpath, fname, 'lo') for dirpath, fname in TEST_DEPS]
+
+ for testsrc, testobj in zip(testdeps, testobjs):
+ self._add_compile(testsrc, testobj, testhdrs)
+
+ for dirpath, fname in TEST_FILES:
+ src = File(dirpath, fname, 'c')
+ obj = File(dirpath, fname, 'lo')
+ prog = File(dirpath, fname, None)
+
+ self._add_compile(src, obj, hdrs)
+
+ # test_all requires extra dependencies
+ if fname == "test_all":
+ cmd = '%s --silent --mode=link %s %s -static -o %s %s %s %s' % (
+ self.LIBTOOL, self.CC, self.LDFLAGS,
+ prog.fname, lib.fname, ' '.join([l.fname for l in [obj] + testobjs]),
+ all_libs)
+ self._add_dep(prog, [lib, obj] + testobjs, cmd)
+ else:
+ cmd = '%s --silent --mode=link %s %s -static -o %s %s %s %s' % (
+ self.LIBTOOL, self.CC, self.LDFLAGS,
+ prog.fname, lib.fname, obj.fname, all_libs)
+ self._add_dep(prog, [lib, obj], cmd)
+
+ # create 'serf-1.pc' if it doesn't exist.
+ pcfile = File('.', PCFILE, 'pc')
+ self._add_dep(pcfile, [], self._write_pcfile)
+
+ def _add_compile(self, src, obj, hdrs):
+ cmd = '%s --silent --mode=compile %s %s %s %s -c -o %s %s' % (
+ self.LIBTOOL, self.CC, self.CFLAGS, self.CPPFLAGS, self.INCLUDES,
+ obj.fname, src.fname)
+ self._add_dep(obj, [src] + hdrs, cmd)
+
+ def _add_dep(self, target, deps, cmd):
+ if target.mtime:
+ for dep in deps:
+ if dep in self.deps or (dep.mtime and dep.mtime > target.mtime):
+ # a dep is newer. this needs to be rebuilt.
+ break
+ else:
+ # this is up to date. don't add it to the deps[] structure.
+ return
+ # else non-existent, so it must be rebuilt.
+
+ # Commands that are strings are cmdline invocations. Otherwise, it
+ # should be a callable.
+ if isinstance(cmd, str):
+ cmd = CommandLine(cmd)
+
+ # register the dependency so this will get built
+ self.deps[target] = deps, cmd
+
+ def _write_pcfile(self):
+ """Generating serf-1.pc ..."""
+
+ open(PCFILE + '.pc', 'w').write(
+"""SERF_MAJOR_VERSION=%d
+prefix=%s
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include/%s
+
+Name: serf
+Description: HTTP client library
+Version: %s
+Requires.private: libssl libcrypto
+Libs: -L${libdir} -lserf-${SERF_MAJOR_VERSION}
+Libs.private: %s
+Cflags: -I${includedir}
+""" % (MAJOR, self.prefix, INCLUDES, get_version(), self.LIBS))
+
+ def build_target(self, target, dry_run):
+ deps, cmd = self.deps.get(target, (None, None))
+ if cmd is None:
+ # it's already up to date. all done.
+ return
+
+ for f in deps:
+ subdep = self.deps.get(f)
+ if subdep:
+ self.build_target(f, dry_run)
+
+ # build the target now
+ print(cmd.__doc__)
+ if not dry_run:
+ result = cmd()
+ if result:
+ raise BuildError(cmd.__doc__, result)
+ # FALLTHROUGH
+
+ # it's a dry run. pretend we built the target.
+ del self.deps[target]
+ return 0
+
+ def install_target(self, target, dry_run):
+ self.build_target(target, dry_run)
+
+ # install the target now
+ if not dry_run:
+
+ for path in (self.libdir, self.pkgconfigdir, self.includedir):
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path)
+ except OSError:
+ raise BuildError('os.makedirs',
+ 'can not create install directories')
+
+ for f in self.hdrs:
+ print("Installing: %s" % (os.path.basename(f.fname),))
+ shutil.copy(f.fname, self.includedir)
+
+ print("Installing: %s.pc" % (PCFILE,))
+ shutil.copy(PCFILE + '.pc', self.pkgconfigdir)
+
+ cmd = '%s --silent --mode=install %s -c -m %d %s %s' % (
+ self.LIBTOOL, '/usr/bin/install', self.MODE, target.fname,
+ self.libdir)
+
+ print("Installing: %s" % (os.path.basename(target.fname),))
+ result = os.system(cmd)
+ if result:
+ raise BuildError(cmd, result)
+ # FALLTHROUGH
+
+ return 0
+
+
+class ConfigScript(object):
+ script_name = None
+ locations = [
+ '/usr/bin',
+ '/usr/local/bin',
+ '/usr/local/apache2/bin',
+ ]
+
+ def __init__(self, search_dir):
+ if search_dir:
+ locations = [search_dir, os.path.join(search_dir, 'bin')]
+ else:
+ locations = self.locations
+
+ for dirname in locations:
+ bin = os.path.join(dirname, self.script_name)
+ if os.access(bin, os.X_OK):
+ self.bin = bin
+ break
+ else:
+ raise ConfigScriptNotFound(self.script_name)
+
+ def get_value(self, env_name, switch):
+ if env_name and os.getenv(env_name):
+ return os.getenv(env_name)
+ return os.popen('%s %s' % (self.bin, switch), 'r').read().strip()
+
+
+class APRConfig(ConfigScript):
+ script_name = 'apr-1-config'
+
+
+class APUConfig(ConfigScript):
+ script_name = 'apu-1-config'
+
+
+class CommandLine(object):
+ """Simple helper to invoke a system command when called."""
+
+ def __init__(self, cmd):
+ self.cmd = cmd
+ self.__doc__ = cmd # when we print the execution of this command
+
+ def __call__(self):
+ return os.system(self.cmd)
+
+
+class File:
+ def __init__(self, dirpath, fname, ext):
+ if ext:
+ self.fname = os.path.join(dirpath, fname + '.' + ext)
+ else:
+ self.fname = os.path.join(dirpath, fname)
+
+ try:
+ s = os.stat(self.fname)
+ except OSError:
+ self.mtime = None
+ else:
+ self.mtime = s[stat.ST_MTIME]
+
+ def __eq__(self, other):
+ return self.fname == other.fname
+
+ def __hash__(self):
+ return hash(self.fname)
+
+
+def get_version():
+ match = re.search('SERF_MAJOR_VERSION ([0-9]+).*'
+ 'SERF_MINOR_VERSION ([0-9]+).*'
+ 'SERF_PATCH_VERSION ([0-9]+)',
+ open('serf.h').read(),
+ re.DOTALL)
+ major, minor, patch = match.groups()
+ return '%s.%s.%s' % (major, minor, patch)
+
+
+class BuildError(Exception):
+ "An error occurred while building a target."
+class TestError(Exception):
+ "An error occurred while running a unit test."
+class ConfigScriptNotFound(Exception):
+ def __init__(self, value):
+ self.value = "ERROR: A configuration script was not found: " + value
+ def __str__(self):
+ return self.value
+
+
+if __name__ == '__main__':
+ main(sys.argv)
+
+
+###
+### TODO:
+### * obey DESTDIR
+### * arfrever says LDFLAGS is passed twice
+### * be able to specify libdir and includedir
+###
diff --git a/ssltunnel.c b/ssltunnel.c
new file mode 100644
index 0000000..6803266
--- /dev/null
+++ b/ssltunnel.c
@@ -0,0 +1,168 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*** Setup a SSL tunnel over a HTTP proxy, according to RFC 2817. ***/
+
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "serf_private.h"
+
+
+/* Structure passed around as baton for the CONNECT request and respone. */
+typedef struct {
+ apr_pool_t *pool;
+ const char *uri;
+} req_ctx_t;
+
+/* forward declaration. */
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool);
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+#if 0
+ req_ctx_t *ctx = acceptor_baton;
+#endif
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+/* If a 200 OK was received for the CONNECT request, consider the connection
+ as ready for use. */
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ serf_status_line sl;
+ req_ctx_t *ctx = handler_baton;
+
+ if (! response) {
+ serf_connection_request_create(request->conn,
+ setup_request,
+ ctx);
+ return APR_SUCCESS;
+ }
+
+ status = serf_bucket_response_status(response, &sl);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+ if (!sl.version && (APR_STATUS_IS_EOF(status) ||
+ APR_STATUS_IS_EAGAIN(status)))
+ {
+ return status;
+ }
+
+ status = serf_bucket_response_wait_for_headers(response);
+ if (status && !APR_STATUS_IS_EOF(status)) {
+ return status;
+ }
+
+ /* Body is supposed to be empty. */
+ if (sl.code == 200) {
+ request->conn->state = SERF_CONN_CONNECTED;
+
+ apr_pool_destroy(ctx->pool);
+ serf_bucket_destroy(request->conn->ssltunnel_ostream);
+ request->conn->stream = NULL;
+ ctx = NULL;
+
+ return APR_EOF;
+ }
+
+ /* Authentication failure and 200 Ok are handled at this point,
+ the rest are errors. */
+ return APR_EGENERAL; /* TODO: better error code */
+}
+
+/* Prepare the CONNECT request. */
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ req_ctx_t *ctx = setup_baton;
+
+ *req_bkt =
+ serf_request_bucket_request_create(request,
+ "CONNECT", ctx->uri,
+ NULL,
+ serf_request_get_alloc(request));
+ *acceptor = accept_response;
+ *acceptor_baton = ctx;
+ *handler = handle_response;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t detect_eof(void *baton, serf_bucket_t *aggregate_bucket)
+{
+ serf_connection_t *conn = baton;
+ conn->hit_eof = 1;
+ return APR_EAGAIN;
+}
+
+/* SSL tunnel is needed, push a CONNECT request on the connection. */
+apr_status_t serf__ssltunnel_connect(serf_connection_t *conn)
+{
+ req_ctx_t *ctx;
+ apr_pool_t *ssltunnel_pool;
+
+ apr_pool_create(&ssltunnel_pool, conn->pool);
+
+ ctx = apr_palloc(ssltunnel_pool, sizeof(*ctx));
+ ctx->pool = ssltunnel_pool;
+ ctx->uri = apr_psprintf(ctx->pool, "%s:%d", conn->host_info.hostinfo,
+ conn->host_info.port);
+
+ conn->ssltunnel_ostream = serf__bucket_stream_create(conn->allocator,
+ detect_eof,
+ conn);
+
+ /* TODO: should be the first request on the connection. */
+ serf_connection_priority_request_create(conn,
+ setup_request,
+ ctx);
+
+ conn->state = SERF_CONN_SETUP_SSLTUNNEL;
+
+ return APR_SUCCESS;
+}
diff --git a/test/CuTest-README.txt b/test/CuTest-README.txt
new file mode 100644
index 0000000..2e90234
--- /dev/null
+++ b/test/CuTest-README.txt
@@ -0,0 +1,225 @@
+Originally obtained from "http://cutest.sourceforge.net/" version 1.4.
+
+HOW TO USE
+
+You can use CuTest to create unit tests to drive your development
+in the style of Extreme Programming. You can also add unit tests to
+existing code to ensure that it works as you suspect.
+
+Your unit tests are an investment. They let you to change your
+code and add new features confidently without worrying about
+accidentally breaking earlier features.
+
+
+LICENSING
+
+Copyright (c) 2003 Asim Jalis
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+1. The origin of this software must not be misrepresented; you must not
+claim that you wrote the original software. If you use this software in
+a product, an acknowledgment in the product documentation would be
+appreciated but is not required.
+
+2. Altered source versions must be plainly marked as such, and must not
+be misrepresented as being the original software.
+
+3. This notice may not be removed or altered from any source
+distribution.
+
+
+GETTING STARTED
+
+To add unit testing to your C code the only files you need are
+CuTest.c and CuTest.h.
+
+CuTestTest.c and AllTests.c have been included to provide an
+example of how to write unit tests and then how to aggregate them
+into suites and into a single AllTests.c file. Suites allow you
+to put group tests into logical sets. AllTests.c combines all the
+suites and runs them.
+
+You should not have to look inside CuTest.c. Looking in
+CuTestTest.c and AllTests.c (for example usage) should be
+sufficient.
+
+After downloading the sources, run your compiler to create an
+executable called AllTests.exe. For example, if you are using
+Windows with the cl.exe compiler you would type:
+
+ cl.exe AllTests.c CuTest.c CuTestTest.c
+ AllTests.exe
+
+This will run all the unit tests associated with CuTest and print
+the output on the console. You can replace cl.exe with gcc or
+your favorite compiler in the command above.
+
+
+DETAILED EXAMPLE
+
+Here is a more detailed example. We will work through a simple
+test first exercise. The goal is to create a library of string
+utilities. First, lets write a function that converts a
+null-terminated string to all upper case.
+
+Ensure that CuTest.c and CuTest.h are accessible from your C
+project. Next, create a file called StrUtil.c with these
+contents:
+
+ #include "CuTest.h"
+
+ char* StrToUpper(char* str) {
+ return str;
+ }
+
+ void TestStrToUpper(CuTest *tc) {
+ char* input = strdup("hello world");
+ char* actual = StrToUpper(input);
+ char* expected = "HELLO WORLD";
+ CuAssertStrEquals(tc, expected, actual);
+ }
+
+ CuSuite* StrUtilGetSuite() {
+ CuSuite* suite = CuSuiteNew();
+ SUITE_ADD_TEST(suite, TestStrToUpper);
+ return suite;
+ }
+
+Create another file called AllTests.c with these contents:
+
+ #include "CuTest.h"
+
+ CuSuite* StrUtilGetSuite();
+
+ void RunAllTests(void) {
+ CuString *output = CuStringNew();
+ CuSuite* suite = CuSuiteNew();
+
+ CuSuiteAddSuite(suite, StrUtilGetSuite());
+
+ CuSuiteRun(suite);
+ CuSuiteSummary(suite, output);
+ CuSuiteDetails(suite, output);
+ printf("%s\n", output->buffer);
+ }
+
+ int main(void) {
+ RunAllTests();
+ }
+
+Then type this on the command line:
+
+ gcc AllTests.c CuTest.c StrUtil.c
+
+to compile. You can replace gcc with your favorite compiler.
+CuTest should be portable enough to handle all Windows and Unix
+compilers. Then to run the tests type:
+
+ a.out
+
+This will print an error because we haven't implemented the
+StrToUpper function correctly. We are just returning the string
+without changing it to upper case.
+
+ char* StrToUpper(char* str) {
+ return str;
+ }
+
+Rewrite this as follows:
+
+ char* StrToUpper(char* str) {
+ char* p;
+ for (p = str ; *p ; ++p) *p = toupper(*p);
+ return str;
+ }
+
+Recompile and run the tests again. The test should pass this
+time.
+
+
+WHAT TO DO NEXT
+
+At this point you might want to write more tests for the
+StrToUpper function. Here are some ideas:
+
+TestStrToUpper_EmptyString : pass in ""
+TestStrToUpper_UpperCase : pass in "HELLO WORLD"
+TestStrToUpper_MixedCase : pass in "HELLO world"
+TestStrToUpper_Numbers : pass in "1234 hello"
+
+As you write each one of these tests add it to StrUtilGetSuite
+function. If you don't the tests won't be run. Later as you write
+other functions and write tests for them be sure to include those
+in StrUtilGetSuite also. The StrUtilGetSuite function should
+include all the tests in StrUtil.c
+
+Over time you will create another file called FunkyStuff.c
+containing other functions unrelated to StrUtil. Follow the same
+pattern. Create a FunkyStuffGetSuite function in FunkyStuff.c.
+And add FunkyStuffGetSuite to AllTests.c.
+
+The framework is designed in the way it is so that it is easy to
+organize a lot of tests.
+
+THE BIG PICTURE
+
+Each individual test corresponds to a CuTest. These are grouped
+to form a CuSuite. CuSuites can hold CuTests or other CuSuites.
+AllTests.c collects all the CuSuites in the program into a single
+CuSuite which it then runs as a single CuSuite.
+
+The project is open source so feel free to take a peek under the
+hood at the CuTest.c file to see how it works. CuTestTest.c
+contains tests for CuTest.c. So CuTest tests itself.
+
+Since AllTests.c has a main() you will need to exclude this when
+you are building your product. Here is a nicer way to do this if
+you want to avoid messing with multiple builds. Remove the main()
+in AllTests.c. Note that it just calls RunAllTests(). Instead
+we'll call this directly from the main program.
+
+Now in the main() of the actual program check to see if the
+command line option "--test" was passed. If it was then I call
+RunAllTests() from AllTests.c. Otherwise run the real program.
+
+Shipping the tests with the code can be useful. If you customers
+complain about a problem you can ask them to run the unit tests
+and send you the output. This can help you to quickly isolate the
+piece of your system that is malfunctioning in the customer's
+environment.
+
+CuTest offers a rich set of CuAssert functions. Here is a list:
+
+void CuAssert(CuTest* tc, char* message, int condition);
+void CuAssertTrue(CuTest* tc, int condition);
+void CuAssertStrEquals(CuTest* tc, char* expected, char* actual);
+void CuAssertIntEquals(CuTest* tc, int expected, int actual);
+void CuAssertPtrEquals(CuTest* tc, void* expected, void* actual);
+void CuAssertPtrNotNull(CuTest* tc, void* pointer);
+
+The project is open source and so you can add other more powerful
+asserts to make your tests easier to write and more concise.
+
+
+AUTOMATING TEST SUITE GENERATION
+
+make-tests.sh will grep through all the .c files in the current
+directory and generate the code to run all the tests contained in
+them. Using this script you don't have to worry about writing
+AllTests.c or dealing with any of the other suite code.
+
+
+CREDITS
+
+[02.23.2003] Dave Glowacki has added
+(1) file name and line numbers to the error messages, (2)
+AssertDblEquals for doubles, (3) Assert<X>Equals_Msg version of
+all the Assert<X>Equals to pass in optional message which is
+printed out on assert failure.
diff --git a/test/CuTest.c b/test/CuTest.c
new file mode 100644
index 0000000..315f59f
--- /dev/null
+++ b/test/CuTest.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2003 Asim Jalis
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any
+ * damages arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any
+ * purpose, including commercial applications, and to alter it and
+ * redistribute it freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you
+ * must not claim that you wrote the original software. If you use
+ * this software in a product, an acknowledgment in the product
+ * documentation would be appreciated but is not required.
+ *
+ * 2. Altered source versions must be plainly marked as such, and
+ * must not be misrepresented as being the original software.
+ *
+ * 3. This notice may not be removed or altered from any source
+ * distribution.
+ *-------------------------------------------------------------------------*
+ *
+ * Originally obtained from "http://cutest.sourceforge.net/" version 1.4.
+ *
+ * Modified for serf as follows
+ * 1) added CuStringFree(), CuTestFree(), CuSuiteFree(), and
+ * CuSuiteFreeDeep()
+ * 0) reformatted the whitespace (doh!)
+ */
+#include <assert.h>
+#include <setjmp.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#include "CuTest.h"
+
+/*-------------------------------------------------------------------------*
+ * CuStr
+ *-------------------------------------------------------------------------*/
+
+char* CuStrAlloc(int size)
+{
+ char* newStr = (char*) malloc( sizeof(char) * (size) );
+ return newStr;
+}
+
+char* CuStrCopy(const char* old)
+{
+ int len = strlen(old);
+ char* newStr = CuStrAlloc(len + 1);
+ strcpy(newStr, old);
+ return newStr;
+}
+
+/*-------------------------------------------------------------------------*
+ * CuString
+ *-------------------------------------------------------------------------*/
+
+void CuStringInit(CuString* str)
+{
+ str->length = 0;
+ str->size = STRING_MAX;
+ str->buffer = (char*) malloc(sizeof(char) * str->size);
+ str->buffer[0] = '\0';
+}
+
+CuString* CuStringNew(void)
+{
+ CuString* str = (CuString*) malloc(sizeof(CuString));
+ str->length = 0;
+ str->size = STRING_MAX;
+ str->buffer = (char*) malloc(sizeof(char) * str->size);
+ str->buffer[0] = '\0';
+ return str;
+}
+
+void CuStringFree(CuString *str)
+{
+ free(str->buffer);
+ free(str);
+}
+
+void CuStringResize(CuString* str, int newSize)
+{
+ str->buffer = (char*) realloc(str->buffer, sizeof(char) * newSize);
+ str->size = newSize;
+}
+
+void CuStringAppend(CuString* str, const char* text)
+{
+ int length;
+
+ if (text == NULL) {
+ text = "NULL";
+ }
+
+ length = strlen(text);
+ if (str->length + length + 1 >= str->size)
+ CuStringResize(str, str->length + length + 1 + STRING_INC);
+ str->length += length;
+ strcat(str->buffer, text);
+}
+
+void CuStringAppendChar(CuString* str, char ch)
+{
+ char text[2];
+ text[0] = ch;
+ text[1] = '\0';
+ CuStringAppend(str, text);
+}
+
+void CuStringAppendFormat(CuString* str, const char* format, ...)
+{
+ va_list argp;
+ char buf[HUGE_STRING_LEN];
+ va_start(argp, format);
+ vsprintf(buf, format, argp);
+ va_end(argp);
+ CuStringAppend(str, buf);
+}
+
+void CuStringInsert(CuString* str, const char* text, int pos)
+{
+ int length = strlen(text);
+ if (pos > str->length)
+ pos = str->length;
+ if (str->length + length + 1 >= str->size)
+ CuStringResize(str, str->length + length + 1 + STRING_INC);
+ memmove(str->buffer + pos + length, str->buffer + pos, (str->length - pos) + 1);
+ str->length += length;
+ memcpy(str->buffer + pos, text, length);
+}
+
+/*-------------------------------------------------------------------------*
+ * CuTest
+ *-------------------------------------------------------------------------*/
+
+void CuTestInit(CuTest* t, const char* name, TestFunction function)
+{
+ t->name = CuStrCopy(name);
+ t->failed = 0;
+ t->ran = 0;
+ t->message = NULL;
+ t->function = function;
+ t->jumpBuf = NULL;
+}
+
+CuTest* CuTestNew(const char* name, TestFunction function)
+{
+ CuTest* tc = CU_ALLOC(CuTest);
+ CuTestInit(tc, name, function);
+ return tc;
+}
+
+void CuTestFree(CuTest* tc)
+{
+ free(tc->name);
+ free(tc);
+}
+
+void CuTestRun(CuTest* tc)
+{
+ jmp_buf buf;
+ tc->jumpBuf = &buf;
+ if (setjmp(buf) == 0)
+ {
+ tc->ran = 1;
+ (tc->function)(tc);
+ }
+ tc->jumpBuf = 0;
+}
+
+static void CuFailInternal(CuTest* tc, const char* file, int line, CuString* string)
+{
+ char buf[HUGE_STRING_LEN];
+
+ sprintf(buf, "%s:%d: ", file, line);
+ CuStringInsert(string, buf, 0);
+
+ tc->failed = 1;
+ tc->message = string->buffer;
+ if (tc->jumpBuf != 0) longjmp(*(tc->jumpBuf), 0);
+}
+
+void CuFail_Line(CuTest* tc, const char* file, int line, const char* message2, const char* message)
+{
+ CuString string;
+
+ CuStringInit(&string);
+ if (message2 != NULL)
+ {
+ CuStringAppend(&string, message2);
+ CuStringAppend(&string, ": ");
+ }
+ CuStringAppend(&string, message);
+ CuFailInternal(tc, file, line, &string);
+}
+
+void CuAssert_Line(CuTest* tc, const char* file, int line, const char* message, int condition)
+{
+ if (condition) return;
+ CuFail_Line(tc, file, line, NULL, message);
+}
+
+void CuAssertStrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
+ const char* expected, const char* actual)
+{
+ CuString string;
+ if ((expected == NULL && actual == NULL) ||
+ (expected != NULL && actual != NULL &&
+ strcmp(expected, actual) == 0))
+ {
+ return;
+ }
+
+ CuStringInit(&string);
+ if (message != NULL)
+ {
+ CuStringAppend(&string, message);
+ CuStringAppend(&string, ": ");
+ }
+ CuStringAppend(&string, "expected <");
+ CuStringAppend(&string, expected);
+ CuStringAppend(&string, "> but was <");
+ CuStringAppend(&string, actual);
+ CuStringAppend(&string, ">");
+ CuFailInternal(tc, file, line, &string);
+}
+
+void CuAssertIntEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
+ int expected, int actual)
+{
+ char buf[STRING_MAX];
+ if (expected == actual) return;
+ sprintf(buf, "expected <%d> but was <%d>", expected, actual);
+ CuFail_Line(tc, file, line, message, buf);
+}
+
+void CuAssertDblEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
+ double expected, double actual, double delta)
+{
+ char buf[STRING_MAX];
+ if (fabs(expected - actual) <= delta) return;
+ sprintf(buf, "expected <%lf> but was <%lf>", expected, actual);
+ CuFail_Line(tc, file, line, message, buf);
+}
+
+void CuAssertPtrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
+ void* expected, void* actual)
+{
+ char buf[STRING_MAX];
+ if (expected == actual) return;
+ sprintf(buf, "expected pointer <0x%p> but was <0x%p>", expected, actual);
+ CuFail_Line(tc, file, line, message, buf);
+}
+
+
+/*-------------------------------------------------------------------------*
+ * CuSuite
+ *-------------------------------------------------------------------------*/
+
+void CuSuiteInit(CuSuite* testSuite)
+{
+ testSuite->count = 0;
+ testSuite->failCount = 0;
+}
+
+CuSuite* CuSuiteNew(void)
+{
+ CuSuite* testSuite = CU_ALLOC(CuSuite);
+ CuSuiteInit(testSuite);
+ return testSuite;
+}
+
+void CuSuiteFree(CuSuite *testSuite)
+{
+ free(testSuite);
+}
+
+void CuSuiteFreeDeep(CuSuite *testSuite)
+{
+ int i;
+ for (i = 0 ; i < testSuite->count ; ++i)
+ {
+ CuTest* testCase = testSuite->list[i];
+ CuTestFree(testCase);
+ }
+ free(testSuite);
+}
+
+void CuSuiteAdd(CuSuite* testSuite, CuTest *testCase)
+{
+ assert(testSuite->count < MAX_TEST_CASES);
+ testSuite->list[testSuite->count] = testCase;
+ testSuite->count++;
+}
+
+void CuSuiteAddSuite(CuSuite* testSuite, CuSuite* testSuite2)
+{
+ int i;
+ for (i = 0 ; i < testSuite2->count ; ++i)
+ {
+ CuTest* testCase = testSuite2->list[i];
+ CuSuiteAdd(testSuite, testCase);
+ }
+}
+
+void CuSuiteRun(CuSuite* testSuite)
+{
+ int i;
+ for (i = 0 ; i < testSuite->count ; ++i)
+ {
+ CuTest* testCase = testSuite->list[i];
+ CuTestRun(testCase);
+ if (testCase->failed) { testSuite->failCount += 1; }
+ }
+}
+
+void CuSuiteSummary(CuSuite* testSuite, CuString* summary)
+{
+ int i;
+ for (i = 0 ; i < testSuite->count ; ++i)
+ {
+ CuTest* testCase = testSuite->list[i];
+ CuStringAppend(summary, testCase->failed ? "F" : ".");
+ }
+ CuStringAppend(summary, "\n\n");
+}
+
+void CuSuiteDetails(CuSuite* testSuite, CuString* details)
+{
+ int i;
+ int failCount = 0;
+
+ if (testSuite->failCount == 0)
+ {
+ int passCount = testSuite->count - testSuite->failCount;
+ const char* testWord = passCount == 1 ? "test" : "tests";
+ CuStringAppendFormat(details, "OK (%d %s)\n", passCount, testWord);
+ }
+ else
+ {
+ if (testSuite->failCount == 1)
+ CuStringAppend(details, "There was 1 failure:\n");
+ else
+ CuStringAppendFormat(details, "There were %d failures:\n", testSuite->failCount);
+
+ for (i = 0 ; i < testSuite->count ; ++i)
+ {
+ CuTest* testCase = testSuite->list[i];
+ if (testCase->failed)
+ {
+ failCount++;
+ CuStringAppendFormat(details, "%d) %s: %s\n",
+ failCount, testCase->name, testCase->message);
+ }
+ }
+ CuStringAppend(details, "\n!!!FAILURES!!!\n");
+
+ CuStringAppendFormat(details, "Runs: %d ", testSuite->count);
+ CuStringAppendFormat(details, "Passes: %d ", testSuite->count - testSuite->failCount);
+ CuStringAppendFormat(details, "Fails: %d\n", testSuite->failCount);
+ }
+}
diff --git a/test/CuTest.h b/test/CuTest.h
new file mode 100644
index 0000000..1895bec
--- /dev/null
+++ b/test/CuTest.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2003 Asim Jalis
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any
+ * damages arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any
+ * purpose, including commercial applications, and to alter it and
+ * redistribute it freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you
+ * must not claim that you wrote the original software. If you use
+ * this software in a product, an acknowledgment in the product
+ * documentation would be appreciated but is not required.
+ *
+ * 2. Altered source versions must be plainly marked as such, and
+ * must not be misrepresented as being the original software.
+ *
+ * 3. This notice may not be removed or altered from any source
+ * distribution.
+ *-------------------------------------------------------------------------*
+ *
+ * Originally obtained from "http://cutest.sourceforge.net/" version 1.4.
+ *
+ * Modified for serf as follows
+ * 2) removed const from struct CuTest.name
+ * 1) added CuStringFree(), CuTestFree(), CuSuiteFree(), and
+ * CuSuiteFreeDeep()
+ * 0) reformatted the whitespace (doh!)
+ */
+#ifndef CU_TEST_H
+#define CU_TEST_H
+
+#include <setjmp.h>
+#include <stdarg.h>
+
+/* CuString */
+
+char* CuStrAlloc(int size);
+char* CuStrCopy(const char* old);
+
+#define CU_ALLOC(TYPE) ((TYPE*) malloc(sizeof(TYPE)))
+
+#define HUGE_STRING_LEN 8192
+#define STRING_MAX 256
+#define STRING_INC 256
+
+typedef struct
+{
+ int length;
+ int size;
+ char* buffer;
+} CuString;
+
+void CuStringInit(CuString* str);
+CuString* CuStringNew(void);
+void CuStringFree(CuString *str);
+void CuStringRead(CuString* str, const char* path);
+void CuStringAppend(CuString* str, const char* text);
+void CuStringAppendChar(CuString* str, char ch);
+void CuStringAppendFormat(CuString* str, const char* format, ...);
+void CuStringInsert(CuString* str, const char* text, int pos);
+void CuStringResize(CuString* str, int newSize);
+
+/* CuTest */
+
+typedef struct CuTest CuTest;
+
+typedef void (*TestFunction)(CuTest *);
+
+struct CuTest
+{
+ char* name;
+ TestFunction function;
+ int failed;
+ int ran;
+ const char* message;
+ jmp_buf *jumpBuf;
+};
+
+void CuTestInit(CuTest* t, const char* name, TestFunction function);
+CuTest* CuTestNew(const char* name, TestFunction function);
+void CuTestFree(CuTest* tc);
+void CuTestRun(CuTest* tc);
+
+/* Internal versions of assert functions -- use the public versions */
+void CuFail_Line(CuTest* tc, const char* file, int line, const char* message2, const char* message);
+void CuAssert_Line(CuTest* tc, const char* file, int line, const char* message, int condition);
+void CuAssertStrEquals_LineMsg(CuTest* tc,
+ const char* file, int line, const char* message,
+ const char* expected, const char* actual);
+void CuAssertIntEquals_LineMsg(CuTest* tc,
+ const char* file, int line, const char* message,
+ int expected, int actual);
+void CuAssertDblEquals_LineMsg(CuTest* tc,
+ const char* file, int line, const char* message,
+ double expected, double actual, double delta);
+void CuAssertPtrEquals_LineMsg(CuTest* tc,
+ const char* file, int line, const char* message,
+ void* expected, void* actual);
+
+/* public assert functions */
+
+#define CuFail(tc, ms) CuFail_Line( (tc), __FILE__, __LINE__, NULL, (ms))
+#define CuAssert(tc, ms, cond) CuAssert_Line((tc), __FILE__, __LINE__, (ms), (cond))
+#define CuAssertTrue(tc, cond) CuAssert_Line((tc), __FILE__, __LINE__, "assert failed", (cond))
+
+#define CuAssertStrEquals(tc,ex,ac) CuAssertStrEquals_LineMsg((tc),__FILE__,__LINE__,NULL,(ex),(ac))
+#define CuAssertStrEquals_Msg(tc,ms,ex,ac) CuAssertStrEquals_LineMsg((tc),__FILE__,__LINE__,(ms),(ex),(ac))
+#define CuAssertIntEquals(tc,ex,ac) CuAssertIntEquals_LineMsg((tc),__FILE__,__LINE__,NULL,(ex),(ac))
+#define CuAssertIntEquals_Msg(tc,ms,ex,ac) CuAssertIntEquals_LineMsg((tc),__FILE__,__LINE__,(ms),(ex),(ac))
+#define CuAssertDblEquals(tc,ex,ac,dl) CuAssertDblEquals_LineMsg((tc),__FILE__,__LINE__,NULL,(ex),(ac),(dl))
+#define CuAssertDblEquals_Msg(tc,ms,ex,ac,dl) CuAssertDblEquals_LineMsg((tc),__FILE__,__LINE__,(ms),(ex),(ac),(dl))
+#define CuAssertPtrEquals(tc,ex,ac) CuAssertPtrEquals_LineMsg((tc),__FILE__,__LINE__,NULL,(ex),(ac))
+#define CuAssertPtrEquals_Msg(tc,ms,ex,ac) CuAssertPtrEquals_LineMsg((tc),__FILE__,__LINE__,(ms),(ex),(ac))
+
+#define CuAssertPtrNotNull(tc,p) CuAssert_Line((tc),__FILE__,__LINE__,"null pointer unexpected",(p != NULL))
+#define CuAssertPtrNotNullMsg(tc,msg,p) CuAssert_Line((tc),__FILE__,__LINE__,(msg),(p != NULL))
+
+/* CuSuite */
+
+#define MAX_TEST_CASES 1024
+
+#define SUITE_ADD_TEST(SUITE,TEST) CuSuiteAdd(SUITE, CuTestNew(#TEST, TEST))
+
+typedef struct
+{
+ int count;
+ CuTest* list[MAX_TEST_CASES];
+ int failCount;
+
+} CuSuite;
+
+
+void CuSuiteInit(CuSuite* testSuite);
+CuSuite* CuSuiteNew(void);
+void CuSuiteFree(CuSuite *testSuite);
+void CuSuiteFreeDeep(CuSuite *testSuite);
+void CuSuiteAdd(CuSuite* testSuite, CuTest *testCase);
+void CuSuiteAddSuite(CuSuite* testSuite, CuSuite* testSuite2);
+void CuSuiteRun(CuSuite* testSuite);
+void CuSuiteSummary(CuSuite* testSuite, CuString* summary);
+void CuSuiteDetails(CuSuite* testSuite, CuString* details);
+
+#endif /* CU_TEST_H */
diff --git a/test/serf_bwtp.c b/test/serf_bwtp.c
new file mode 100644
index 0000000..0e73a2d
--- /dev/null
+++ b/test/serf_bwtp.c
@@ -0,0 +1,635 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_uri.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+#include <apr_base64.h>
+#include <apr_getopt.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+typedef struct {
+ int count;
+ int using_ssl;
+ serf_ssl_context_t *ssl_ctx;
+ serf_bucket_alloc_t *bkt_alloc;
+} app_baton_t;
+
+typedef struct {
+#if APR_MAJOR_VERSION > 0
+ apr_uint32_t requests_outstanding;
+#else
+ apr_atomic_t requests_outstanding;
+#endif
+ int print_headers;
+
+ serf_response_acceptor_t acceptor;
+ app_baton_t *acceptor_baton;
+
+ serf_response_handler_t handler;
+
+ const char *host;
+ const char *method;
+ const char *path;
+ const char *req_body_path;
+ const char *authn;
+} handler_baton_t;
+
+/* Kludges for APR 0.9 support. */
+#if APR_MAJOR_VERSION == 0
+#define apr_atomic_inc32 apr_atomic_inc
+#define apr_atomic_dec32 apr_atomic_dec
+#define apr_atomic_read32 apr_atomic_read
+#endif
+
+static void closed_connection(serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool)
+{
+ if (why) {
+ abort();
+ }
+}
+
+static apr_status_t ignore_all_cert_errors(void *data, int failures,
+ const serf_ssl_certificate_t *cert)
+{
+ /* In a real application, you would normally would not want to do this */
+ return APR_SUCCESS;
+}
+
+static apr_status_t conn_setup(apr_socket_t *skt,
+ serf_bucket_t **input_bkt,
+ serf_bucket_t **output_bkt,
+ void *setup_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ app_baton_t *ctx = setup_baton;
+
+ c = serf_bucket_socket_create(skt, ctx->bkt_alloc);
+ if (ctx->using_ssl) {
+ c = serf_bucket_ssl_decrypt_create(c, ctx->ssl_ctx, ctx->bkt_alloc);
+ if (!ctx->ssl_ctx) {
+ ctx->ssl_ctx = serf_bucket_ssl_decrypt_context_get(c);
+ }
+ serf_ssl_server_cert_callback_set(ctx->ssl_ctx, ignore_all_cert_errors, NULL);
+
+ *output_bkt = serf_bucket_ssl_encrypt_create(*output_bkt, ctx->ssl_ctx,
+ ctx->bkt_alloc);
+ }
+
+ *input_bkt = c;
+
+ return APR_SUCCESS;
+}
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+static serf_bucket_t* accept_bwtp(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ app_baton_t *app_ctx = acceptor_baton;
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, app_ctx->bkt_alloc);
+
+ return serf_bucket_bwtp_incoming_frame_create(c, app_ctx->bkt_alloc);
+}
+
+/* fwd declare */
+static apr_status_t handle_bwtp_upgrade(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool);
+
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+ serf_bucket_t *hdrs_bkt;
+ serf_bucket_t *body_bkt;
+
+ if (ctx->req_body_path) {
+ apr_file_t *file;
+ apr_status_t status;
+
+ status = apr_file_open(&file, ctx->req_body_path, APR_READ,
+ APR_OS_DEFAULT, pool);
+
+ if (status) {
+ printf("Error opening file (%s)\n", ctx->req_body_path);
+ return status;
+ }
+
+ body_bkt = serf_bucket_file_create(file,
+ serf_request_get_alloc(request));
+ }
+ else {
+ body_bkt = NULL;
+ }
+
+ /*
+ *req_bkt = serf_bucket_bwtp_message_create(0, body_bkt,
+ serf_request_get_alloc(request));
+ */
+
+ *req_bkt = serf_bucket_bwtp_header_create(0, "MESSAGE",
+ serf_request_get_alloc(request));
+
+ hdrs_bkt = serf_bucket_bwtp_frame_get_headers(*req_bkt);
+
+ /* FIXME: Shouldn't we be able to figure out the host ourselves? */
+ serf_bucket_headers_setn(hdrs_bkt, "Host", ctx->host);
+ serf_bucket_headers_setn(hdrs_bkt, "User-Agent",
+ "Serf/" SERF_VERSION_STRING);
+ /* Shouldn't serf do this for us? */
+ serf_bucket_headers_setn(hdrs_bkt, "Accept-Encoding", "gzip");
+
+ if (ctx->authn != NULL) {
+ serf_bucket_headers_setn(hdrs_bkt, "Authorization", ctx->authn);
+ }
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t setup_bwtp_upgrade(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *hdrs_bkt;
+ handler_baton_t *ctx = setup_baton;
+
+ *req_bkt = serf_bucket_request_create("OPTIONS", "*", NULL,
+ serf_request_get_alloc(request));
+
+ hdrs_bkt = serf_bucket_request_get_headers(*req_bkt);
+
+ serf_bucket_headers_setn(hdrs_bkt, "Upgrade", "BWTP/1.0");
+ serf_bucket_headers_setn(hdrs_bkt, "Connection", "Upgrade");
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = handle_bwtp_upgrade;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t setup_channel(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+ serf_bucket_t *hdrs_bkt;
+
+ *req_bkt = serf_bucket_bwtp_channel_open(0, ctx->path,
+ serf_request_get_alloc(request));
+
+ hdrs_bkt = serf_bucket_bwtp_frame_get_headers(*req_bkt);
+
+ /* FIXME: Shouldn't we be able to figure out the host ourselves? */
+ serf_bucket_headers_setn(hdrs_bkt, "Host", ctx->host);
+ serf_bucket_headers_setn(hdrs_bkt, "User-Agent",
+ "Serf/" SERF_VERSION_STRING);
+ /* Shouldn't serf do this for us? */
+ serf_bucket_headers_setn(hdrs_bkt, "Accept-Encoding", "gzip");
+
+ if (ctx->authn != NULL) {
+ serf_bucket_headers_setn(hdrs_bkt, "Authorization", ctx->authn);
+ }
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t setup_close(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+
+ *req_bkt = serf_bucket_bwtp_channel_close(0, serf_request_get_alloc(request));
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t handle_bwtp(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len;
+ apr_status_t status;
+
+ if (!response) {
+ /* A NULL response can come back if the request failed completely */
+ return APR_EGENERAL;
+ }
+ status = serf_bucket_bwtp_incoming_frame_wait_for_headers(response);
+ if (SERF_BUCKET_READ_ERROR(status) || APR_STATUS_IS_EAGAIN(status)) {
+ return status;
+ }
+ printf("BWTP %p frame: %d %d %s\n",
+ response, serf_bucket_bwtp_frame_get_channel(response),
+ serf_bucket_bwtp_frame_get_type(response),
+ serf_bucket_bwtp_frame_get_phrase(response));
+
+
+ while (1) {
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* got some data. print it out. */
+ if (len) {
+ puts("BWTP body:\n---");
+ fwrite(data, 1, len, stdout);
+ puts("\n---");
+ }
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return status;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+static apr_status_t handle_bwtp_upgrade(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len;
+ serf_status_line sl;
+ apr_status_t status;
+ handler_baton_t *ctx = handler_baton;
+
+ if (!response) {
+ /* A NULL response can come back if the request failed completely */
+ return APR_EGENERAL;
+ }
+ status = serf_bucket_response_status(response, &sl);
+ if (status) {
+ return status;
+ }
+
+ while (1) {
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* got some data. print it out. */
+ fwrite(data, 1, len, stdout);
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+ int i;
+ serf_connection_t *conn;
+ serf_request_t *new_req;
+
+ conn = serf_request_get_conn(request);
+
+ serf_connection_set_async_responses(conn,
+ accept_bwtp, ctx->acceptor_baton, handle_bwtp, NULL);
+
+ new_req = serf_connection_request_create(conn, setup_channel, ctx);
+ for (i = 0; i < ctx->acceptor_baton->count; i++) {
+ new_req = serf_connection_request_create(conn,
+ setup_request,
+ ctx);
+ }
+
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return status;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len;
+ serf_status_line sl;
+ apr_status_t status;
+ handler_baton_t *ctx = handler_baton;
+
+ if (!response) {
+ /* A NULL response can come back if the request failed completely */
+ return APR_EGENERAL;
+ }
+ status = serf_bucket_response_status(response, &sl);
+ if (status) {
+ return status;
+ }
+
+ while (1) {
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* got some data. print it out. */
+ fwrite(data, 1, len, stdout);
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+ if (ctx->print_headers) {
+ serf_bucket_t *hdrs;
+ hdrs = serf_bucket_response_get_headers(response);
+ while (1) {
+ status = serf_bucket_read(hdrs, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ fwrite(data, 1, len, stdout);
+ if (APR_STATUS_IS_EOF(status)) {
+ break;
+ }
+ }
+ }
+
+ apr_atomic_dec32(&ctx->requests_outstanding);
+ if (!ctx->requests_outstanding) {
+ serf_connection_t *conn;
+ serf_request_t *new_req;
+
+ conn = serf_request_get_conn(request);
+ new_req =
+ serf_connection_request_create(conn, setup_close, ctx);
+ }
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return status;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+static void print_usage(apr_pool_t *pool)
+{
+ puts("serf_get [options] URL");
+ puts("-h\tDisplay this help");
+ puts("-v\tDisplay version");
+ puts("-H\tPrint response headers");
+ puts("-n <count> Fetch URL <count> times");
+ puts("-a <user:password> Present Basic authentication credentials");
+ puts("-m <method> Use the <method> HTTP Method");
+ puts("-f <file> Use the <file> as the request body");
+}
+
+int main(int argc, const char **argv)
+{
+ apr_status_t status;
+ apr_pool_t *pool;
+ apr_sockaddr_t *address;
+ serf_context_t *context;
+ serf_connection_t *connection;
+ serf_request_t *request;
+ app_baton_t app_ctx;
+ handler_baton_t handler_ctx;
+ apr_uri_t url;
+ const char *raw_url, *method, *req_body_path = NULL;
+ int i;
+ int print_headers;
+ char *authn = NULL;
+ apr_getopt_t *opt;
+ char opt_c;
+ const char *opt_arg;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+ /* serf_initialize(); */
+
+ /* Default to one round of fetching. */
+ app_ctx.count = 1;
+ /* Default to GET. */
+ method = "GET";
+ /* Do not print headers by default. */
+ print_headers = 0;
+
+ apr_getopt_init(&opt, pool, argc, argv);
+
+ while ((status = apr_getopt(opt, "a:f:hHm:n:v", &opt_c, &opt_arg)) ==
+ APR_SUCCESS) {
+ int srclen, enclen;
+
+ switch (opt_c) {
+ case 'a':
+ srclen = strlen(opt_arg);
+ enclen = apr_base64_encode_len(srclen);
+ authn = apr_palloc(pool, enclen + 6);
+ strcpy(authn, "Basic ");
+ (void) apr_base64_encode(&authn[6], opt_arg, srclen);
+ break;
+ case 'f':
+ req_body_path = opt_arg;
+ break;
+ case 'h':
+ print_usage(pool);
+ exit(0);
+ break;
+ case 'H':
+ print_headers = 1;
+ break;
+ case 'm':
+ method = opt_arg;
+ break;
+ case 'n':
+ errno = 0;
+ app_ctx.count = apr_strtoi64(opt_arg, NULL, 10);
+ if (errno) {
+ printf("Problem converting number of times to fetch URL (%d)\n",
+ errno);
+ return errno;
+ }
+ break;
+ case 'v':
+ puts("Serf version: " SERF_VERSION_STRING);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ if (opt->ind != opt->argc - 1) {
+ print_usage(pool);
+ exit(-1);
+ }
+
+ raw_url = argv[opt->ind];
+
+ apr_uri_parse(pool, raw_url, &url);
+ if (!url.port) {
+ url.port = apr_uri_port_of_scheme(url.scheme);
+ }
+ if (!url.path) {
+ url.path = "/";
+ }
+
+ if (strcasecmp(url.scheme, "https") == 0) {
+ app_ctx.using_ssl = 1;
+ }
+ else {
+ app_ctx.using_ssl = 0;
+ }
+
+ status = apr_sockaddr_info_get(&address,
+ url.hostname, APR_UNSPEC, url.port, 0,
+ pool);
+ if (status) {
+ printf("Error creating address: %d\n", status);
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ context = serf_context_create(pool);
+
+ /* ### Connection or Context should have an allocator? */
+ app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL);
+ app_ctx.ssl_ctx = NULL;
+
+ connection = serf_connection_create(context, address,
+ conn_setup, &app_ctx,
+ closed_connection, &app_ctx,
+ pool);
+
+ handler_ctx.requests_outstanding = 0;
+ handler_ctx.print_headers = print_headers;
+
+ handler_ctx.host = url.hostinfo;
+ handler_ctx.method = method;
+ handler_ctx.path = url.path;
+ handler_ctx.authn = authn;
+
+ handler_ctx.req_body_path = req_body_path;
+
+ handler_ctx.acceptor = accept_response;
+ handler_ctx.acceptor_baton = &app_ctx;
+ handler_ctx.handler = handle_response;
+
+ request = serf_connection_request_create(connection, setup_bwtp_upgrade,
+ &handler_ctx);
+
+ for (i = 0; i < app_ctx.count; i++) {
+ apr_atomic_inc32(&handler_ctx.requests_outstanding);
+ }
+
+ while (1) {
+ status = serf_context_run(context, SERF_DURATION_FOREVER, pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ continue;
+ if (status) {
+ char buf[200];
+
+ printf("Error running context: (%d) %s\n", status,
+ apr_strerror(status, buf, sizeof(buf)));
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+ if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) {
+ break;
+ }
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(app_ctx.bkt_alloc);
+ }
+
+ serf_connection_close(connection);
+
+ apr_pool_destroy(pool);
+ return 0;
+}
diff --git a/test/serf_get.c b/test/serf_get.c
new file mode 100644
index 0000000..ac1bb8e
--- /dev/null
+++ b/test/serf_get.c
@@ -0,0 +1,503 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_uri.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+#include <apr_base64.h>
+#include <apr_getopt.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+typedef struct {
+ const char *hostinfo;
+ int using_ssl;
+ serf_ssl_context_t *ssl_ctx;
+ serf_bucket_alloc_t *bkt_alloc;
+} app_baton_t;
+
+static void closed_connection(serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool)
+{
+ if (why) {
+ abort();
+ }
+}
+
+static void print_ssl_cert_errors(int failures)
+{
+ if (failures) {
+ fprintf(stderr, "INVALID CERTIFICATE:\n");
+ if (failures & SERF_SSL_CERT_NOTYETVALID)
+ fprintf(stderr, "* The certificate is not yet valid.\n");
+ if (failures & SERF_SSL_CERT_EXPIRED)
+ fprintf(stderr, "* The certificate expired.\n");
+ if (failures & SERF_SSL_CERT_SELF_SIGNED)
+ fprintf(stderr, "* The certificate is self-signed.\n");
+ if (failures & SERF_SSL_CERT_UNKNOWNCA)
+ fprintf(stderr, "* The CA is unknown.\n");
+ if (failures & SERF_SSL_CERT_UNKNOWN_FAILURE)
+ fprintf(stderr, "* Unknown failure.\n");
+ }
+}
+
+static apr_status_t ignore_all_cert_errors(void *data, int failures,
+ const serf_ssl_certificate_t *cert)
+{
+ print_ssl_cert_errors(failures);
+
+ /* In a real application, you would normally would not want to do this */
+ return APR_SUCCESS;
+}
+
+static apr_status_t print_certs(void *data, int failures, int error_depth,
+ const serf_ssl_certificate_t * const * certs,
+ apr_size_t certs_len)
+{
+ apr_pool_t *pool;
+ const serf_ssl_certificate_t *current;
+
+ apr_pool_create(&pool, NULL);
+
+ fprintf(stderr, "Received certificate chain with length %d\n",
+ (int)certs_len);
+ print_ssl_cert_errors(failures);
+ if (failures)
+ fprintf(stderr, "Error at depth=%d\n", error_depth);
+ else
+ fprintf(stderr, "Chain provided with depth=%d\n", error_depth);
+
+ while ((current = *certs) != NULL)
+ {
+ fprintf(stderr, "\n-----BEGIN CERTIFICATE-----\n");
+ fprintf(stderr, "%s\n", serf_ssl_cert_export(current, pool));
+ fprintf(stderr, "-----END CERTIFICATE-----\n");
+ ++certs;
+ }
+
+ apr_pool_destroy(pool);
+ return APR_SUCCESS;
+}
+
+static apr_status_t conn_setup(apr_socket_t *skt,
+ serf_bucket_t **input_bkt,
+ serf_bucket_t **output_bkt,
+ void *setup_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ app_baton_t *ctx = setup_baton;
+
+ c = serf_bucket_socket_create(skt, ctx->bkt_alloc);
+ if (ctx->using_ssl) {
+ c = serf_bucket_ssl_decrypt_create(c, ctx->ssl_ctx, ctx->bkt_alloc);
+ if (!ctx->ssl_ctx) {
+ ctx->ssl_ctx = serf_bucket_ssl_decrypt_context_get(c);
+ }
+ serf_ssl_server_cert_chain_callback_set(ctx->ssl_ctx,
+ ignore_all_cert_errors,
+ print_certs, NULL);
+ serf_ssl_set_hostname(ctx->ssl_ctx, ctx->hostinfo);
+
+ *output_bkt = serf_bucket_ssl_encrypt_create(*output_bkt, ctx->ssl_ctx,
+ ctx->bkt_alloc);
+ }
+
+ *input_bkt = c;
+
+ return APR_SUCCESS;
+}
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+typedef struct {
+#if APR_MAJOR_VERSION > 0
+ apr_uint32_t completed_requests;
+#else
+ apr_atomic_t completed_requests;
+#endif
+ int print_headers;
+
+ serf_response_acceptor_t acceptor;
+ app_baton_t *acceptor_baton;
+
+ serf_response_handler_t handler;
+
+ const char *host;
+ const char *method;
+ const char *path;
+ const char *req_body_path;
+ const char *authn;
+} handler_baton_t;
+
+/* Kludges for APR 0.9 support. */
+#if APR_MAJOR_VERSION == 0
+#define apr_atomic_inc32 apr_atomic_inc
+#define apr_atomic_dec32 apr_atomic_dec
+#define apr_atomic_read32 apr_atomic_read
+#endif
+
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len;
+ serf_status_line sl;
+ apr_status_t status;
+ handler_baton_t *ctx = handler_baton;
+
+ if (!response) {
+ /* A NULL response can come back if the request failed completely */
+ return APR_EGENERAL;
+ }
+ status = serf_bucket_response_status(response, &sl);
+ if (status) {
+ return status;
+ }
+
+ while (1) {
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* got some data. print it out. */
+ fwrite(data, 1, len, stdout);
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+ if (ctx->print_headers) {
+ serf_bucket_t *hdrs;
+ hdrs = serf_bucket_response_get_headers(response);
+ while (1) {
+ status = serf_bucket_read(hdrs, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ fwrite(data, 1, len, stdout);
+ if (APR_STATUS_IS_EOF(status)) {
+ break;
+ }
+ }
+ }
+
+ apr_atomic_inc32(&ctx->completed_requests);
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return status;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+ serf_bucket_t *hdrs_bkt;
+ serf_bucket_t *body_bkt;
+
+ if (ctx->req_body_path) {
+ apr_file_t *file;
+ apr_status_t status;
+
+ status = apr_file_open(&file, ctx->req_body_path, APR_READ,
+ APR_OS_DEFAULT, pool);
+
+ if (status) {
+ printf("Error opening file (%s)\n", ctx->req_body_path);
+ return status;
+ }
+
+ body_bkt = serf_bucket_file_create(file,
+ serf_request_get_alloc(request));
+ }
+ else {
+ body_bkt = NULL;
+ }
+
+ *req_bkt = serf_request_bucket_request_create(request, ctx->method,
+ ctx->path, body_bkt,
+ serf_request_get_alloc(request));
+
+ hdrs_bkt = serf_bucket_request_get_headers(*req_bkt);
+
+ serf_bucket_headers_setn(hdrs_bkt, "User-Agent",
+ "Serf/" SERF_VERSION_STRING);
+ /* Shouldn't serf do this for us? */
+ serf_bucket_headers_setn(hdrs_bkt, "Accept-Encoding", "gzip");
+
+ if (ctx->authn != NULL) {
+ serf_bucket_headers_setn(hdrs_bkt, "Authorization", ctx->authn);
+ }
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static void print_usage(apr_pool_t *pool)
+{
+ puts("serf_get [options] URL");
+ puts("-h\tDisplay this help");
+ puts("-v\tDisplay version");
+ puts("-H\tPrint response headers");
+ puts("-n <count> Fetch URL <count> times");
+ puts("-a <user:password> Present Basic authentication credentials");
+ puts("-m <method> Use the <method> HTTP Method");
+ puts("-f <file> Use the <file> as the request body");
+ puts("-p <hostname:port> Use the <host:port> as proxy server");
+}
+
+int main(int argc, const char **argv)
+{
+ apr_status_t status;
+ apr_pool_t *pool;
+ serf_context_t *context;
+ serf_connection_t *connection;
+ serf_request_t *request;
+ app_baton_t app_ctx;
+ handler_baton_t handler_ctx;
+ apr_uri_t url;
+ const char *proxy = NULL;
+ const char *raw_url, *method, *req_body_path = NULL;
+ int count;
+ int i;
+ int print_headers;
+ char *authn = NULL;
+ apr_getopt_t *opt;
+ char opt_c;
+ const char *opt_arg;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+ /* serf_initialize(); */
+
+ /* Default to one round of fetching. */
+ count = 1;
+ /* Default to GET. */
+ method = "GET";
+ /* Do not print headers by default. */
+ print_headers = 0;
+
+ apr_getopt_init(&opt, pool, argc, argv);
+
+ while ((status = apr_getopt(opt, "a:f:hHm:n:vp:", &opt_c, &opt_arg)) ==
+ APR_SUCCESS) {
+ int srclen, enclen;
+
+ switch (opt_c) {
+ case 'a':
+ srclen = strlen(opt_arg);
+ enclen = apr_base64_encode_len(srclen);
+ authn = apr_palloc(pool, enclen + 6);
+ strcpy(authn, "Basic ");
+ (void) apr_base64_encode(&authn[6], opt_arg, srclen);
+ break;
+ case 'f':
+ req_body_path = opt_arg;
+ break;
+ case 'h':
+ print_usage(pool);
+ exit(0);
+ break;
+ case 'H':
+ print_headers = 1;
+ break;
+ case 'm':
+ method = opt_arg;
+ break;
+ case 'n':
+ errno = 0;
+ count = apr_strtoi64(opt_arg, NULL, 10);
+ if (errno) {
+ printf("Problem converting number of times to fetch URL (%d)\n",
+ errno);
+ return errno;
+ }
+ break;
+ case 'p':
+ proxy = opt_arg;
+ break;
+ case 'v':
+ puts("Serf version: " SERF_VERSION_STRING);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ if (opt->ind != opt->argc - 1) {
+ print_usage(pool);
+ exit(-1);
+ }
+
+ raw_url = argv[opt->ind];
+
+ apr_uri_parse(pool, raw_url, &url);
+ if (!url.port) {
+ url.port = apr_uri_port_of_scheme(url.scheme);
+ }
+ if (!url.path) {
+ url.path = "/";
+ }
+
+ if (strcasecmp(url.scheme, "https") == 0) {
+ app_ctx.using_ssl = 1;
+ }
+ else {
+ app_ctx.using_ssl = 0;
+ }
+
+ app_ctx.hostinfo = url.hostinfo;
+
+ context = serf_context_create(pool);
+
+ if (proxy)
+ {
+ apr_sockaddr_t *proxy_address = NULL;
+ apr_port_t proxy_port;
+ char *proxy_host;
+ char *proxy_scope;
+
+ status = apr_parse_addr_port(&proxy_host, &proxy_scope, &proxy_port, proxy, pool);
+ if (status)
+ {
+ printf("Cannot parse proxy hostname/port: %d\n", status);
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ if (!proxy_host)
+ {
+ printf("Proxy hostname must be specified\n");
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ if (!proxy_port)
+ {
+ printf("Proxy port must be specified\n");
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ status = apr_sockaddr_info_get(&proxy_address, proxy_host, APR_UNSPEC,
+ proxy_port, 0, pool);
+
+ if (status)
+ {
+ printf("Cannot resolve proxy address '%s': %d\n", proxy_host, status);
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ serf_config_proxy(context, proxy_address);
+ }
+
+ /* ### Connection or Context should have an allocator? */
+ app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL);
+ app_ctx.ssl_ctx = NULL;
+
+ status = serf_connection_create2(&connection, context, url,
+ conn_setup, &app_ctx,
+ closed_connection, &app_ctx,
+ pool);
+ if (status) {
+ printf("Error creating connection: %d\n", status);
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+
+ handler_ctx.completed_requests = 0;
+ handler_ctx.print_headers = print_headers;
+
+ handler_ctx.host = url.hostinfo;
+ handler_ctx.method = method;
+ handler_ctx.path = url.path;
+ handler_ctx.authn = authn;
+
+ handler_ctx.req_body_path = req_body_path;
+
+ handler_ctx.acceptor = accept_response;
+ handler_ctx.acceptor_baton = &app_ctx;
+ handler_ctx.handler = handle_response;
+
+ for (i = 0; i < count; i++) {
+ request = serf_connection_request_create(connection, setup_request,
+ &handler_ctx);
+ }
+
+ while (1) {
+ status = serf_context_run(context, SERF_DURATION_FOREVER, pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ continue;
+ if (status) {
+ char buf[200];
+
+ printf("Error running context: (%d) %s\n", status,
+ apr_strerror(status, buf, sizeof(buf)));
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+ if (apr_atomic_read32(&handler_ctx.completed_requests) >= count) {
+ break;
+ }
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(app_ctx.bkt_alloc);
+ }
+
+ serf_connection_close(connection);
+
+ apr_pool_destroy(pool);
+ return 0;
+}
diff --git a/test/serf_request.c b/test/serf_request.c
new file mode 100644
index 0000000..0bd6a11
--- /dev/null
+++ b/test/serf_request.c
@@ -0,0 +1,79 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include "serf.h"
+
+static apr_status_t drain_bucket(serf_bucket_t *bucket)
+{
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ while (1) {
+ status = serf_bucket_read(bucket, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /* got some data. print it out. */
+ fwrite(data, 1, len, stdout);
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+int main(int argc, const char **argv)
+{
+ apr_pool_t *pool;
+ serf_bucket_t *req_bkt;
+ serf_bucket_t *hdrs_bkt;
+ serf_bucket_alloc_t *allocator;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+ /* serf_initialize(); */
+
+ allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+
+ req_bkt = serf_bucket_request_create("GET", "/", NULL, allocator);
+
+ hdrs_bkt = serf_bucket_request_get_headers(req_bkt);
+
+ /* FIXME: Shouldn't we be able to figure out the host ourselves? */
+ serf_bucket_headers_setn(hdrs_bkt, "Host", "localhost");
+ serf_bucket_headers_setn(hdrs_bkt, "User-Agent",
+ "Serf/" SERF_VERSION_STRING);
+
+ (void) drain_bucket(req_bkt);
+
+ serf_bucket_destroy(req_bkt);
+
+ apr_pool_destroy(pool);
+
+ return 0;
+}
diff --git a/test/serf_response.c b/test/serf_response.c
new file mode 100644
index 0000000..a8d7d85
--- /dev/null
+++ b/test/serf_response.c
@@ -0,0 +1,161 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_uri.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+typedef struct {
+ const char *resp_file;
+ serf_bucket_t *bkt;
+} accept_baton_t;
+
+static serf_bucket_t* accept_response(void *acceptor_baton,
+ serf_bucket_alloc_t *bkt_alloc,
+ apr_pool_t *pool)
+{
+ accept_baton_t *ctx = acceptor_baton;
+ serf_bucket_t *c;
+ apr_file_t *file;
+ apr_status_t status;
+
+ status = apr_file_open(&file, ctx->resp_file,
+ APR_READ, APR_OS_DEFAULT, pool);
+ if (status) {
+ return NULL;
+ }
+
+ c = ctx->bkt = serf_bucket_file_create(file, bkt_alloc);
+
+ c = serf_bucket_barrier_create(c, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+typedef struct {
+#if APR_MAJOR_VERSION > 0
+ apr_uint32_t requests_outstanding;
+#else
+ apr_atomic_t requests_outstanding;
+#endif
+} handler_baton_t;
+
+/* Kludges for APR 0.9 support. */
+#if APR_MAJOR_VERSION == 0
+#define apr_atomic_inc32 apr_atomic_inc
+#define apr_atomic_dec32 apr_atomic_dec
+#define apr_atomic_read32 apr_atomic_read
+#endif
+
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data, *s;
+ apr_size_t len;
+ serf_status_line sl;
+ apr_status_t status;
+ handler_baton_t *ctx = handler_baton;
+
+ status = serf_bucket_response_status(response, &sl);
+ if (status) {
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return APR_SUCCESS;
+ }
+ abort();
+ }
+
+ status = serf_bucket_read(response, 2048, &data, &len);
+
+ if (!status || APR_STATUS_IS_EOF(status)) {
+ if (len) {
+ s = apr_pstrmemdup(pool, data, len);
+ printf("%s", s);
+ }
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ }
+ if (APR_STATUS_IS_EOF(status)) {
+ serf_bucket_t *hdrs;
+ const char *v;
+
+ hdrs = serf_bucket_response_get_headers(response);
+ v = serf_bucket_headers_get(hdrs, "Trailer-Test");
+ if (v) {
+ printf("Trailer-Test: %s\n", v);
+ }
+
+ apr_atomic_dec32(&ctx->requests_outstanding);
+ }
+
+ return status;
+}
+
+int main(int argc, const char **argv)
+{
+ apr_status_t status;
+ apr_pool_t *pool;
+ serf_bucket_t *resp_bkt;
+ accept_baton_t accept_ctx;
+ handler_baton_t handler_ctx;
+ serf_bucket_alloc_t *allocator;
+
+ if (argc != 2) {
+ printf("%s: [Resp. File]\n", argv[0]);
+ exit(-1);
+ }
+ accept_ctx.resp_file = argv[1];
+ accept_ctx.bkt = NULL;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+ apr_atomic_init(pool);
+ /* serf_initialize(); */
+
+ allocator = serf_bucket_allocator_create(pool, NULL, NULL);
+
+ handler_ctx.requests_outstanding = 0;
+ apr_atomic_inc32(&handler_ctx.requests_outstanding);
+
+ resp_bkt = accept_response(&accept_ctx, allocator, pool);
+ while (1) {
+ status = handle_response(NULL, resp_bkt, &handler_ctx, pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ continue;
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ printf("Error running context: %d\n", status);
+ exit(1);
+ }
+ if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) {
+ break;
+ }
+ }
+ serf_bucket_destroy(resp_bkt);
+ serf_bucket_destroy(accept_ctx.bkt);
+
+ apr_pool_destroy(pool);
+
+ return 0;
+}
diff --git a/test/serf_server.c b/test/serf_server.c
new file mode 100644
index 0000000..4d8a0c6
--- /dev/null
+++ b/test/serf_server.c
@@ -0,0 +1,147 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_uri.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+#include <apr_base64.h>
+#include <apr_getopt.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+typedef struct {
+ int foo;
+} app_baton_t;
+
+
+static apr_status_t incoming_request(serf_context_t *ctx,
+ serf_incoming_request_t *req,
+ void *request_baton,
+ apr_pool_t *pool)
+{
+ printf("INCOMING REQUEST\n");
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t accept_fn(serf_context_t *ctx,
+ serf_listener_t *l,
+ void *baton,
+ apr_socket_t *insock,
+ apr_pool_t *pool)
+{
+ serf_incoming_t *client = NULL;
+ printf("new connection from \n");
+ return serf_incoming_create(&client, ctx, insock, baton, incoming_request, pool);
+}
+
+static void print_usage(apr_pool_t *pool)
+{
+ puts("serf_server [options] listen_address:listen_port");
+ puts("-h\tDisplay this help");
+ puts("-v\tDisplay version");
+}
+
+int main(int argc, const char **argv)
+{
+ apr_status_t rv;
+ apr_pool_t *pool;
+ serf_context_t *context;
+ serf_listener_t *listener;
+ app_baton_t app_ctx;
+ const char *listen_spec;
+ char *addr = NULL;
+ char *scope_id = NULL;
+ apr_port_t port;
+ apr_getopt_t *opt;
+ char opt_c;
+ const char *opt_arg;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+
+ apr_getopt_init(&opt, pool, argc, argv);
+
+ while ((rv = apr_getopt(opt, "hv", &opt_c, &opt_arg)) ==
+ APR_SUCCESS) {
+ switch (opt_c) {
+ case 'h':
+ print_usage(pool);
+ exit(0);
+ break;
+ case 'v':
+ puts("Serf version: " SERF_VERSION_STRING);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ if (opt->ind != opt->argc - 1) {
+ print_usage(pool);
+ exit(-1);
+ }
+
+ listen_spec = argv[opt->ind];
+
+
+ rv = apr_parse_addr_port(&addr, &scope_id, &port, listen_spec, pool);
+ if (rv) {
+ printf("Error parsing listen address: %d\n", rv);
+ exit(1);
+ }
+
+ if (!addr) {
+ addr = "0.0.0.0";
+ }
+
+ if (port == 0) {
+ port = 8080;
+ }
+
+ context = serf_context_create(pool);
+
+ /* TODO.... stuff */
+ app_ctx.foo = 1;
+ rv = serf_listener_create(&listener, context, addr, port,
+ &app_ctx, accept_fn, pool);
+ if (rv) {
+ printf("Error parsing listener: %d\n", rv);
+ exit(1);
+ }
+
+ while (1) {
+ rv = serf_context_run(context, SERF_DURATION_FOREVER, pool);
+ if (APR_STATUS_IS_TIMEUP(rv))
+ continue;
+ if (rv) {
+ char buf[200];
+
+ printf("Error running context: (%d) %s\n", rv,
+ apr_strerror(rv, buf, sizeof(buf)));
+ apr_pool_destroy(pool);
+ exit(1);
+ }
+ }
+
+ apr_pool_destroy(pool);
+ return 0;
+}
diff --git a/test/serf_spider.c b/test/serf_spider.c
new file mode 100644
index 0000000..2945e98
--- /dev/null
+++ b/test/serf_spider.c
@@ -0,0 +1,826 @@
+/* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_uri.h>
+#include <apr_strings.h>
+#include <apr_atomic.h>
+#include <apr_base64.h>
+#include <apr_getopt.h>
+#include <apr_xml.h>
+#include <apr_thread_proc.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
+#include <apr_version.h>
+
+#include "serf.h"
+#include "serf_bucket_util.h"
+
+/*#define SERF_VERBOSE*/
+
+#if !APR_HAS_THREADS
+#error serf spider needs threads.
+#endif
+
+/* This is a rough-sketch example of how a multi-threaded spider could be
+ * constructed using serf.
+ *
+ * A network thread will read in a URL and feed it into an expat parser.
+ * After the entire response is read, the XML structure and the path is
+ * passed to a set of parser threads. These threads will scan the document
+ * for HTML href's and queue up any links that it finds.
+ *
+ * It does try to stay on the same server as it only uses one connection.
+ *
+ * Because we feed the responses into an XML parser, the documents must be
+ * well-formed XHTML.
+ *
+ * There is no duplicate link detection. You've been warned.
+ */
+
+/* The structure passed to the parser thread after we've read the entire
+ * response.
+ */
+typedef struct {
+ apr_xml_doc *doc;
+ char *path;
+ apr_pool_t *pool;
+} doc_path_t;
+
+typedef struct {
+ const char *authn;
+ int using_ssl;
+ serf_ssl_context_t *ssl_ctx;
+ serf_bucket_alloc_t *bkt_alloc;
+} app_baton_t;
+
+static void closed_connection(serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool)
+{
+ if (why) {
+ abort();
+ }
+}
+
+static apr_status_t conn_setup(apr_socket_t *skt,
+ serf_bucket_t **input_bkt,
+ serf_bucket_t **output_bkt,
+ void *setup_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ app_baton_t *ctx = setup_baton;
+
+ c = serf_bucket_socket_create(skt, ctx->bkt_alloc);
+ if (ctx->using_ssl) {
+ c = serf_bucket_ssl_decrypt_create(c, ctx->ssl_ctx, ctx->bkt_alloc);
+ }
+
+ *input_bkt = c;
+
+ return APR_SUCCESS;
+}
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+typedef struct {
+ serf_bucket_alloc_t *allocator;
+#if APR_MAJOR_VERSION > 0
+ apr_uint32_t *requests_outstanding;
+#else
+ apr_atomic_t *requests_outstanding;
+#endif
+ serf_bucket_alloc_t *doc_queue_alloc;
+ apr_array_header_t *doc_queue;
+ apr_thread_cond_t *doc_queue_condvar;
+
+ const char *hostinfo;
+
+ /* includes: path, query, fragment. */
+ char *full_path;
+ apr_size_t full_path_len;
+
+ char *path;
+ apr_size_t path_len;
+
+ char *query;
+ apr_size_t query_len;
+
+ char *fragment;
+ apr_size_t fragment_len;
+
+ apr_xml_parser *parser;
+ apr_pool_t *parser_pool;
+
+ int hdr_read;
+ int is_html;
+
+ serf_response_acceptor_t acceptor;
+ void *acceptor_baton;
+ serf_response_handler_t handler;
+
+ app_baton_t *app_ctx;
+} handler_baton_t;
+
+/* Kludges for APR 0.9 support. */
+#if APR_MAJOR_VERSION == 0
+#define apr_atomic_inc32 apr_atomic_inc
+#define apr_atomic_dec32 apr_atomic_dec
+#define apr_atomic_read32 apr_atomic_read
+#define apr_atomic_set32 apr_atomic_set
+#endif
+
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ const char *data;
+ apr_size_t len;
+ serf_status_line sl;
+ apr_status_t status;
+ handler_baton_t *ctx = handler_baton;
+
+ if (!response) {
+ /* Oh no! We've been cancelled! */
+ abort();
+ }
+
+ status = serf_bucket_response_status(response, &sl);
+ if (status) {
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return APR_SUCCESS;
+ }
+ abort();
+ }
+
+ while (1) {
+ status = serf_bucket_read(response, 2048, &data, &len);
+
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ /*fwrite(data, 1, len, stdout);*/
+
+ if (!ctx->hdr_read) {
+ serf_bucket_t *hdrs;
+ const char *val;
+
+ printf("Processing %s\n", ctx->path);
+
+ hdrs = serf_bucket_response_get_headers(response);
+ val = serf_bucket_headers_get(hdrs, "Content-Type");
+ /* FIXME: This check isn't quite right because Content-Type could
+ * be decorated; ideally strcasestr would be correct.
+ */
+ if (val && strcasecmp(val, "text/html") == 0) {
+ ctx->is_html = 1;
+ apr_pool_create(&ctx->parser_pool, NULL);
+ ctx->parser = apr_xml_parser_create(ctx->parser_pool);
+ }
+ else {
+ ctx->is_html = 0;
+ }
+ ctx->hdr_read = 1;
+ }
+ if (ctx->is_html) {
+ apr_status_t xs;
+
+ xs = apr_xml_parser_feed(ctx->parser, data, len);
+ /* Uh-oh. */
+ if (xs) {
+#ifdef SERF_VERBOSE
+ printf("XML parser error (feed): %d\n", xs);
+#endif
+ ctx->is_html = 0;
+ }
+ }
+
+ /* are we done yet? */
+ if (APR_STATUS_IS_EOF(status)) {
+
+ if (ctx->is_html) {
+ apr_xml_doc *xmld;
+ apr_status_t xs;
+ doc_path_t *dup;
+
+ xs = apr_xml_parser_done(ctx->parser, &xmld);
+ if (xs) {
+#ifdef SERF_VERBOSE
+ printf("XML parser error (done): %d\n", xs);
+#endif
+ return xs;
+ }
+ dup = (doc_path_t*)
+ serf_bucket_mem_alloc(ctx->doc_queue_alloc,
+ sizeof(doc_path_t));
+ dup->doc = xmld;
+ dup->path = (char*)serf_bucket_mem_alloc(ctx->doc_queue_alloc,
+ ctx->path_len);
+ memcpy(dup->path, ctx->path, ctx->path_len);
+ dup->pool = ctx->parser_pool;
+
+ *(doc_path_t **)apr_array_push(ctx->doc_queue) = dup;
+
+ apr_thread_cond_signal(ctx->doc_queue_condvar);
+ }
+
+ apr_atomic_dec32(ctx->requests_outstanding);
+ serf_bucket_mem_free(ctx->allocator, ctx->path);
+ if (ctx->query) {
+ serf_bucket_mem_free(ctx->allocator, ctx->query);
+ serf_bucket_mem_free(ctx->allocator, ctx->full_path);
+ }
+ if (ctx->fragment) {
+ serf_bucket_mem_free(ctx->allocator, ctx->fragment);
+ }
+ serf_bucket_mem_free(ctx->allocator, ctx);
+ return APR_EOF;
+ }
+
+ /* have we drained the response so far? */
+ if (APR_STATUS_IS_EAGAIN(status))
+ return APR_SUCCESS;
+
+ /* loop to read some more. */
+ }
+ /* NOTREACHED */
+}
+
+typedef struct {
+ apr_uint32_t *requests_outstanding;
+ serf_connection_t *connection;
+ apr_array_header_t *doc_queue;
+ serf_bucket_alloc_t *doc_queue_alloc;
+
+ apr_thread_cond_t *condvar;
+ apr_thread_mutex_t *mutex;
+
+ /* Master host: for now, we'll stick to one host. */
+ const char *hostinfo;
+
+ app_baton_t *app_ctx;
+} parser_baton_t;
+
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+ serf_bucket_t *hdrs_bkt;
+
+ *req_bkt = serf_bucket_request_create("GET", ctx->full_path, NULL,
+ serf_request_get_alloc(request));
+
+ hdrs_bkt = serf_bucket_request_get_headers(*req_bkt);
+
+ /* FIXME: Shouldn't we be able to figure out the host ourselves? */
+ serf_bucket_headers_setn(hdrs_bkt, "Host", ctx->hostinfo);
+ serf_bucket_headers_setn(hdrs_bkt, "User-Agent",
+ "Serf/" SERF_VERSION_STRING);
+
+ /* Shouldn't serf do this for us? */
+ serf_bucket_headers_setn(hdrs_bkt, "Accept-Encoding", "gzip");
+
+ if (ctx->app_ctx->authn != NULL) {
+ serf_bucket_headers_setn(hdrs_bkt, "Authorization",
+ ctx->app_ctx->authn);
+ }
+
+ if (ctx->app_ctx->using_ssl) {
+ serf_bucket_alloc_t *req_alloc;
+
+ req_alloc = serf_request_get_alloc(request);
+
+ if (ctx->app_ctx->ssl_ctx == NULL) {
+ *req_bkt = serf_bucket_ssl_encrypt_create(*req_bkt, NULL,
+ ctx->app_ctx->bkt_alloc);
+ ctx->app_ctx->ssl_ctx =
+ serf_bucket_ssl_encrypt_context_get(*req_bkt);
+ }
+ else {
+ *req_bkt =
+ serf_bucket_ssl_encrypt_create(*req_bkt, ctx->app_ctx->ssl_ctx,
+ ctx->app_ctx->bkt_alloc);
+ }
+ }
+
+
+#ifdef SERF_VERBOSE
+ printf("Url requesting: %s\n", ctx->full_path);
+#endif
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx->acceptor_baton;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t create_request(const char *hostinfo,
+ const char *path,
+ const char *query,
+ const char *fragment,
+ parser_baton_t *ctx,
+ apr_pool_t *tmppool)
+{
+ handler_baton_t *new_ctx;
+
+ if (hostinfo) {
+ /* Yes, this is a pointer comparison; not a string comparison. */
+ if (hostinfo != ctx->hostinfo) {
+ /* Not on the same host; ignore */
+ return APR_SUCCESS;
+ }
+ }
+
+ new_ctx = (handler_baton_t*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
+ sizeof(handler_baton_t));
+ new_ctx->allocator = ctx->app_ctx->bkt_alloc;
+ new_ctx->requests_outstanding = ctx->requests_outstanding;
+ new_ctx->app_ctx = ctx->app_ctx;
+
+ /* See above: this example restricts ourselves to the same vhost. */
+ new_ctx->hostinfo = ctx->hostinfo;
+
+ /* we need to copy it so it falls under the request's scope. */
+ new_ctx->path_len = strlen(path);
+ new_ctx->path = (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
+ new_ctx->path_len + 1);
+ memcpy(new_ctx->path, path, new_ctx->path_len + 1);
+
+ /* we need to copy it so it falls under the request's scope. */
+ if (query) {
+ new_ctx->query_len = strlen(query);
+ new_ctx->query = (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
+ new_ctx->query_len + 1);
+ memcpy(new_ctx->query, query, new_ctx->query_len + 1);
+ }
+ else {
+ new_ctx->query = NULL;
+ new_ctx->query_len = 0;
+ }
+
+ /* we need to copy it so it falls under the request's scope. */
+ if (fragment) {
+ new_ctx->fragment_len = strlen(fragment);
+ new_ctx->fragment =
+ (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
+ new_ctx->fragment_len + 1);
+ memcpy(new_ctx->fragment, fragment, new_ctx->fragment_len + 1);
+ }
+ else {
+ new_ctx->fragment = NULL;
+ new_ctx->fragment_len = 0;
+ }
+
+ if (!new_ctx->query) {
+ new_ctx->full_path = new_ctx->path;
+ new_ctx->full_path_len = new_ctx->path_len;
+ }
+ else {
+ new_ctx->full_path_len = new_ctx->path_len + new_ctx->query_len;
+ new_ctx->full_path =
+ (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
+ new_ctx->full_path_len + 1);
+ memcpy(new_ctx->full_path, new_ctx->path, new_ctx->path_len);
+ memcpy(new_ctx->full_path + new_ctx->path_len, new_ctx->query,
+ new_ctx->query_len + 1);
+ }
+
+ new_ctx->hdr_read = 0;
+
+ new_ctx->doc_queue_condvar = ctx->condvar;
+ new_ctx->doc_queue = ctx->doc_queue;
+ new_ctx->doc_queue_alloc = ctx->doc_queue_alloc;
+
+ new_ctx->acceptor = accept_response;
+ new_ctx->acceptor_baton = &ctx->app_ctx;
+ new_ctx->handler = handle_response;
+
+ apr_atomic_inc32(ctx->requests_outstanding);
+
+ serf_connection_request_create(ctx->connection, setup_request, new_ctx);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t put_req(const char *c, const char *orig_path,
+ parser_baton_t *ctx, apr_pool_t *pool)
+{
+ apr_status_t status;
+ apr_uri_t url;
+
+ /* Build url */
+#ifdef SERF_VERBOSE
+ printf("Url discovered: %s\n", c);
+#endif
+
+ status = apr_uri_parse(pool, c, &url);
+
+ /* We got something that was minimally useful. */
+ if (status == 0 && url.path) {
+ const char *path, *query, *fragment;
+
+ /* This is likely a relative URL. So, merge and hope for the
+ * best.
+ */
+ if (!url.hostinfo && url.path[0] != '/') {
+ struct iovec vec[2];
+ char *c;
+ apr_size_t nbytes;
+
+ c = strrchr(orig_path, '/');
+
+ /* assert c */
+ if (!c) {
+ return APR_EGENERAL;
+ }
+
+ vec[0].iov_base = (char*)orig_path;
+ vec[0].iov_len = c - orig_path + 1;
+
+ /* If the HTML is cute and gives us ./foo - skip the ./ */
+ if (url.path[0] == '.' && url.path[1] == '/') {
+ vec[1].iov_base = url.path + 2;
+ vec[1].iov_len = strlen(url.path + 2);
+ }
+ else if (url.path[0] == '.' && url.path[1] == '.') {
+ /* FIXME We could be cute and consolidate the path; we're a
+ * toy example. So no.
+ */
+ vec[1].iov_base = url.path;
+ vec[1].iov_len = strlen(url.path);
+ }
+ else {
+ vec[1].iov_base = url.path;
+ vec[1].iov_len = strlen(url.path);
+ }
+
+ path = apr_pstrcatv(pool, vec, 2, &nbytes);
+ }
+ else {
+ path = url.path;
+ }
+
+ query = url.query;
+ fragment = url.fragment;
+
+ return create_request(url.hostinfo, path, query, fragment, ctx, pool);
+ }
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t find_href(apr_xml_elem *e, const char *orig_path,
+ parser_baton_t *ctx, apr_pool_t *pool)
+{
+ apr_status_t status;
+
+ do {
+ /* print */
+ if (e->name[0] == 'a' && e->name[1] == '\0') {
+ apr_xml_attr *a;
+
+ a = e->attr;
+ while (a) {
+ if (strcasecmp(a->name, "href") == 0) {
+ break;
+ }
+ a = a->next;
+ }
+ if (a) {
+ status = put_req(a->value, orig_path, ctx, pool);
+ if (status) {
+ return status;
+ }
+ }
+ }
+
+ if (e->first_child) {
+ status = find_href(e->first_child, orig_path, ctx, pool);
+ if (status) {
+ return status;
+ }
+ }
+
+ e = e->next;
+ }
+ while (e);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t find_href_doc(apr_xml_doc *doc, const char *path,
+ parser_baton_t *ctx,
+ apr_pool_t *pool)
+{
+ return find_href(doc->root, path, ctx, pool);
+}
+
+static void * APR_THREAD_FUNC parser_thread(apr_thread_t *thread, void *data)
+{
+ apr_status_t status;
+ apr_pool_t *pool, *subpool;
+ parser_baton_t *ctx;
+
+ ctx = (parser_baton_t*)data;
+ pool = apr_thread_pool_get(thread);
+
+ apr_pool_create(&subpool, pool);
+
+ while (1) {
+ doc_path_t *dup;
+
+ apr_pool_clear(subpool);
+
+ /* Grab it. */
+ apr_thread_mutex_lock(ctx->mutex);
+ /* Sleep. */
+ apr_thread_cond_wait(ctx->condvar, ctx->mutex);
+
+ /* Fetch the doc off the list. */
+ if (ctx->doc_queue->nelts) {
+ dup = *(doc_path_t**)(apr_array_pop(ctx->doc_queue));
+ /* dup = (ctx->doc_queue->conns->elts)[0]; */
+ }
+ else {
+ dup = NULL;
+ }
+
+ /* Don't need the mutex now. */
+ apr_thread_mutex_unlock(ctx->mutex);
+
+ /* Parse the doc/url pair. */
+ if (dup) {
+ status = find_href_doc(dup->doc, dup->path, ctx, subpool);
+ if (status) {
+ printf("Error finding hrefs: %d %s\n", status, dup->path);
+ }
+ /* Free the doc pair and its pool. */
+ apr_pool_destroy(dup->pool);
+ serf_bucket_mem_free(ctx->doc_queue_alloc, dup->path);
+ serf_bucket_mem_free(ctx->doc_queue_alloc, dup);
+ }
+
+ /* Hey are we done? */
+ if (!apr_atomic_read32(ctx->requests_outstanding)) {
+ break;
+ }
+ }
+ return NULL;
+}
+
+static void print_usage(apr_pool_t *pool)
+{
+ puts("serf_get [options] URL");
+ puts("-h\tDisplay this help");
+ puts("-v\tDisplay version");
+ puts("-H\tPrint response headers");
+ puts("-a <user:password> Present Basic authentication credentials");
+}
+
+int main(int argc, const char **argv)
+{
+ apr_status_t status;
+ apr_pool_t *pool;
+ apr_sockaddr_t *address;
+ serf_context_t *context;
+ serf_connection_t *connection;
+ app_baton_t app_ctx;
+ handler_baton_t *handler_ctx;
+ apr_uri_t url;
+ const char *raw_url, *method;
+ int count;
+ apr_getopt_t *opt;
+ char opt_c;
+ char *authn = NULL;
+ const char *opt_arg;
+
+ /* For the parser threads */
+ apr_thread_t *thread[3];
+ apr_threadattr_t *tattr;
+ apr_status_t parser_status;
+ parser_baton_t *parser_ctx;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ apr_pool_create(&pool, NULL);
+ apr_atomic_init(pool);
+ /* serf_initialize(); */
+
+ /* Default to one round of fetching. */
+ count = 1;
+ /* Default to GET. */
+ method = "GET";
+
+ apr_getopt_init(&opt, pool, argc, argv);
+
+ while ((status = apr_getopt(opt, "a:hv", &opt_c, &opt_arg)) ==
+ APR_SUCCESS) {
+ int srclen, enclen;
+
+ switch (opt_c) {
+ case 'a':
+ srclen = strlen(opt_arg);
+ enclen = apr_base64_encode_len(srclen);
+ authn = apr_palloc(pool, enclen + 6);
+ strcpy(authn, "Basic ");
+ (void) apr_base64_encode(&authn[6], opt_arg, srclen);
+ break;
+ case 'h':
+ print_usage(pool);
+ exit(0);
+ break;
+ case 'v':
+ puts("Serf version: " SERF_VERSION_STRING);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ if (opt->ind != opt->argc - 1) {
+ print_usage(pool);
+ exit(-1);
+ }
+
+ raw_url = argv[opt->ind];
+
+ apr_uri_parse(pool, raw_url, &url);
+ if (!url.port) {
+ url.port = apr_uri_port_of_scheme(url.scheme);
+ }
+ if (!url.path) {
+ url.path = "/";
+ }
+
+ if (strcasecmp(url.scheme, "https") == 0) {
+ app_ctx.using_ssl = 1;
+ }
+ else {
+ app_ctx.using_ssl = 0;
+ }
+
+ status = apr_sockaddr_info_get(&address,
+ url.hostname, APR_UNSPEC, url.port, 0,
+ pool);
+ if (status) {
+ printf("Error creating address: %d\n", status);
+ exit(1);
+ }
+
+ context = serf_context_create(pool);
+
+ /* ### Connection or Context should have an allocator? */
+ app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL);
+ app_ctx.ssl_ctx = NULL;
+ app_ctx.authn = authn;
+
+ connection = serf_connection_create(context, address,
+ conn_setup, &app_ctx,
+ closed_connection, &app_ctx,
+ pool);
+
+ handler_ctx = (handler_baton_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
+ sizeof(handler_baton_t));
+ handler_ctx->allocator = app_ctx.bkt_alloc;
+ handler_ctx->doc_queue = apr_array_make(pool, 1, sizeof(doc_path_t*));
+ handler_ctx->doc_queue_alloc = app_ctx.bkt_alloc;
+
+ handler_ctx->requests_outstanding =
+ (apr_uint32_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
+ sizeof(apr_uint32_t));
+ apr_atomic_set32(handler_ctx->requests_outstanding, 0);
+ handler_ctx->hdr_read = 0;
+
+ parser_ctx = (void*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
+ sizeof(parser_baton_t));
+
+ parser_ctx->requests_outstanding = handler_ctx->requests_outstanding;
+ parser_ctx->connection = connection;
+ parser_ctx->app_ctx = &app_ctx;
+ parser_ctx->doc_queue = handler_ctx->doc_queue;
+ parser_ctx->doc_queue_alloc = handler_ctx->doc_queue_alloc;
+ /* Restrict ourselves to this host. */
+ parser_ctx->hostinfo = url.hostinfo;
+
+ status = apr_thread_mutex_create(&parser_ctx->mutex,
+ APR_THREAD_MUTEX_DEFAULT, pool);
+ if (status) {
+ printf("Couldn't create mutex %d\n", status);
+ return status;
+ }
+
+ status = apr_thread_cond_create(&parser_ctx->condvar, pool);
+ if (status) {
+ printf("Couldn't create condvar: %d\n", status);
+ return status;
+ }
+
+ /* Let the handler now which condvar to use. */
+ handler_ctx->doc_queue_condvar = parser_ctx->condvar;
+
+ apr_threadattr_create(&tattr, pool);
+
+ /* Start the parser thread. */
+ apr_thread_create(&thread[0], tattr, parser_thread, parser_ctx, pool);
+
+ /* Deliver the first request. */
+ create_request(url.hostinfo, url.path, NULL, NULL, parser_ctx, pool);
+
+ /* Go run our normal thread. */
+ while (1) {
+ int tries = 0;
+
+ status = serf_context_run(context, SERF_DURATION_FOREVER, pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ continue;
+ if (status) {
+ char buf[200];
+
+ printf("Error running context: (%d) %s\n", status,
+ apr_strerror(status, buf, sizeof(buf)));
+ exit(1);
+ }
+
+ /* We run this check to allow our parser threads to add more
+ * requests to our queue.
+ */
+ for (tries = 0; tries < 3; tries++) {
+ if (!apr_atomic_read32(handler_ctx->requests_outstanding)) {
+#ifdef SERF_VERBOSE
+ printf("Waiting...");
+#endif
+ apr_sleep(100000);
+#ifdef SERF_VERBOSE
+ printf("Done\n");
+#endif
+ }
+ else {
+ break;
+ }
+ }
+ if (tries >= 3) {
+ break;
+ }
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(app_ctx.bkt_alloc);
+ }
+
+ printf("Quitting...\n");
+ serf_connection_close(connection);
+
+ /* wake up the parser via condvar signal */
+ apr_thread_cond_signal(parser_ctx->condvar);
+
+ status = apr_thread_join(&parser_status, thread[0]);
+ if (status) {
+ printf("Error joining thread: %d\n", status);
+ return status;
+ }
+
+ serf_bucket_mem_free(app_ctx.bkt_alloc, handler_ctx->requests_outstanding);
+ serf_bucket_mem_free(app_ctx.bkt_alloc, parser_ctx);
+
+ apr_pool_destroy(pool);
+ return 0;
+}
diff --git a/test/serftestca.pem b/test/serftestca.pem
new file mode 100644
index 0000000..15aa004
--- /dev/null
+++ b/test/serftestca.pem
@@ -0,0 +1,66 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number:
+ c2:31:db:41:c9:7b:a9:46
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=BE, ST=Antwerp, O=In Serf we trust, Inc., OU=Test Suite, CN=Serf/emailAddress=serf@example.com, L=Mechelen
+ Validity
+ Not Before: Mar 21 13:18:17 2008 GMT
+ Not After : Mar 21 13:18:17 2011 GMT
+ Subject: C=BE, ST=Antwerp, O=In Serf we trust, Inc., OU=Test Suite, CN=Serf/emailAddress=serf@example.com, L=Mechelen
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public Key: (1024 bit)
+ Modulus (1024 bit):
+ 00:a1:21:58:60:ea:79:ae:9f:0f:f4:69:b5:af:55:
+ 9a:8b:da:1d:74:80:88:44:42:46:64:59:98:3e:84:
+ e1:70:f7:18:e1:7c:8d:cc:42:27:cd:e6:31:47:51:
+ 66:3d:58:1c:f9:54:26:4f:12:b7:0e:46:a7:27:c1:
+ ca:ac:a7:38:0f:a1:00:fb:a8:20:77:37:14:6a:7b:
+ 65:34:1c:eb:30:fa:0b:9e:57:2c:7a:04:13:50:d4:
+ e2:7c:66:5f:97:45:75:78:47:f5:9e:68:9d:40:b9:
+ 94:d3:78:50:c8:19:10:50:52:fd:2f:b8:a1:75:74:
+ ad:73:95:46:9a:8e:95:b2:3d
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Key Identifier:
+ 15:BB:2D:8C:63:10:0D:31:01:D1:C6:26:01:27:43:A5:F4:57:6E:ED
+ X509v3 Authority Key Identifier:
+ keyid:15:BB:2D:8C:63:10:0D:31:01:D1:C6:26:01:27:43:A5:F4:57:6E:ED
+ DirName:/C=BE/ST=Antwerp/O=In Serf we trust, Inc./OU=Test Suite/CN=Serf/emailAddress=serf@example.com/L=Mechelen
+ serial:C2:31:DB:41:C9:7B:A9:46
+
+ X509v3 Basic Constraints:
+ CA:TRUE
+ Signature Algorithm: sha1WithRSAEncryption
+ 59:9c:b0:62:cc:a4:c0:98:68:4b:52:bf:fa:84:ee:b5:65:d5:
+ a7:51:39:77:a0:be:d6:14:b0:7a:64:2f:0d:ee:49:e8:b6:6a:
+ c7:1d:5f:bc:27:4c:25:4b:25:b7:69:5c:07:86:54:69:22:99:
+ d9:1a:5d:dd:38:c9:00:b4:29:89:7d:ce:df:b5:3f:57:05:ee:
+ 5b:0e:a4:f0:bc:7a:4f:1b:ba:84:85:e8:0f:e3:6b:fa:6f:cf:
+ 3f:23:7c:d0:dd:c8:95:91:46:8a:05:84:84:46:cf:e3:c8:fc:
+ 9c:94:c2:dd:15:d4:6e:d1:31:0b:d9:7b:ce:1e:13:72:c9:2e:
+ a9:86
+-----BEGIN CERTIFICATE-----
+MIIDsjCCAxugAwIBAgIJAMIx20HJe6lGMA0GCSqGSIb3DQEBBQUAMIGYMQswCQYD
+VQQGEwJCRTEQMA4GA1UECBMHQW50d2VycDEfMB0GA1UEChMWSW4gU2VyZiB3ZSB0
+cnVzdCwgSW5jLjETMBEGA1UECxMKVGVzdCBTdWl0ZTENMAsGA1UEAxMEU2VyZjEf
+MB0GCSqGSIb3DQEJARYQc2VyZkBleGFtcGxlLmNvbTERMA8GA1UEBxMITWVjaGVs
+ZW4wHhcNMDgwMzIxMTMxODE3WhcNMTEwMzIxMTMxODE3WjCBmDELMAkGA1UEBhMC
+QkUxEDAOBgNVBAgTB0FudHdlcnAxHzAdBgNVBAoTFkluIFNlcmYgd2UgdHJ1c3Qs
+IEluYy4xEzARBgNVBAsTClRlc3QgU3VpdGUxDTALBgNVBAMTBFNlcmYxHzAdBgkq
+hkiG9w0BCQEWEHNlcmZAZXhhbXBsZS5jb20xETAPBgNVBAcTCE1lY2hlbGVuMIGf
+MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQChIVhg6nmunw/0abWvVZqL2h10gIhE
+QkZkWZg+hOFw9xjhfI3MQifN5jFHUWY9WBz5VCZPErcORqcnwcqspzgPoQD7qCB3
+NxRqe2U0HOsw+gueVyx6BBNQ1OJ8Zl+XRXV4R/WeaJ1AuZTTeFDIGRBQUv0vuKF1
+dK1zlUaajpWyPQIDAQABo4IBADCB/TAdBgNVHQ4EFgQUFbstjGMQDTEB0cYmASdD
+pfRXbu0wgc0GA1UdIwSBxTCBwoAUFbstjGMQDTEB0cYmASdDpfRXbu2hgZ6kgZsw
+gZgxCzAJBgNVBAYTAkJFMRAwDgYDVQQIEwdBbnR3ZXJwMR8wHQYDVQQKExZJbiBT
+ZXJmIHdlIHRydXN0LCBJbmMuMRMwEQYDVQQLEwpUZXN0IFN1aXRlMQ0wCwYDVQQD
+EwRTZXJmMR8wHQYJKoZIhvcNAQkBFhBzZXJmQGV4YW1wbGUuY29tMREwDwYDVQQH
+EwhNZWNoZWxlboIJAMIx20HJe6lGMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADgYEAWZywYsykwJhoS1K/+oTutWXVp1E5d6C+1hSwemQvDe5J6LZqxx1fvCdM
+JUslt2lcB4ZUaSKZ2Rpd3TjJALQpiX3O37U/VwXuWw6k8Lx6Txu6hIXoD+Nr+m/P
+PyN80N3IlZFGigWEhEbP48j8nJTC3RXUbtExC9l7zh4TcskuqYY=
+-----END CERTIFICATE-----
diff --git a/test/server/test_server.c b/test/server/test_server.c
new file mode 100644
index 0000000..1ad2c9e
--- /dev/null
+++ b/test/server/test_server.c
@@ -0,0 +1,359 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+#include "apr_pools.h"
+#include <apr_poll.h>
+#include <apr_version.h>
+#include <stdlib.h>
+
+#include "serf.h"
+
+#include "test_server.h"
+
+struct serv_ctx_t {
+ /* Pool for resource allocation. */
+ apr_pool_t *pool;
+
+ apr_int32_t options;
+
+ /* Array of actions which server will replay when client connected. */
+ test_server_action_t *action_list;
+ /* Size of action_list array. */
+ apr_size_t action_count;
+ /* Index of current action. */
+ apr_size_t cur_action;
+
+ /* Array of messages the server will receive from the client. */
+ test_server_message_t *message_list;
+ /* Size of message_list array. */
+ apr_size_t message_count;
+ /* Index of current message. */
+ apr_size_t cur_message;
+
+ /* Number of messages received that the server didn't respond to yet. */
+ apr_size_t outstanding_responses;
+
+ /* Position in message buffer (incoming messages being read). */
+ apr_size_t message_buf_pos;
+
+ /* Position in action buffer. (outgoing messages being sent). */
+ apr_size_t action_buf_pos;
+
+ /* Address for server binding. */
+ apr_sockaddr_t *serv_addr;
+ apr_socket_t *serv_sock;
+
+ /* Accepted client socket. NULL if there is no client socket. */
+ apr_socket_t *client_sock;
+
+};
+
+/* Replay support functions */
+static void next_message(serv_ctx_t *servctx)
+{
+ servctx->cur_message++;
+}
+
+static void next_action(serv_ctx_t *servctx)
+{
+ servctx->cur_action++;
+ servctx->action_buf_pos = 0;
+}
+
+/* Verify received requests and take the necessary actions
+ (return a response, kill the connection ...) */
+static apr_status_t replay(serv_ctx_t *servctx,
+ apr_int16_t rtnevents,
+ apr_pool_t *pool)
+{
+ apr_status_t status = APR_SUCCESS;
+ test_server_action_t *action;
+
+ if (rtnevents & APR_POLLIN) {
+ if (servctx->message_list == NULL) {
+ /* we're not expecting any requests to reach this server! */
+ printf("Received request where none was expected\n");
+
+ return APR_EGENERAL;
+ }
+
+ if (servctx->cur_action >= servctx->action_count) {
+ char buf[128];
+ apr_size_t len = sizeof(buf);
+
+ status = apr_socket_recv(servctx->client_sock, buf, &len);
+ if (! APR_STATUS_IS_EAGAIN(status)) {
+ /* we're out of actions! */
+ printf("Received more requests than expected.\n");
+
+ return APR_EGENERAL;
+ }
+ return status;
+ }
+
+ action = &servctx->action_list[servctx->cur_action];
+
+ if (action->kind == SERVER_IGNORE_AND_KILL_CONNECTION) {
+ char buf[128];
+ apr_size_t len = sizeof(buf);
+
+ status = apr_socket_recv(servctx->client_sock, buf, &len);
+
+ if (status == APR_EOF) {
+ apr_socket_close(servctx->client_sock);
+ servctx->client_sock = NULL;
+ next_action(servctx);
+ return APR_SUCCESS;
+ }
+
+ return status;
+ }
+ else if (action->kind == SERVER_RECV ||
+ (action->kind == SERVER_RESPOND &&
+ servctx->outstanding_responses == 0)) {
+ apr_size_t msg_len, len;
+ char buf[128];
+ test_server_message_t *message;
+
+ message = &servctx->message_list[servctx->cur_message];
+ msg_len = strlen(message->text);
+
+ len = msg_len - servctx->message_buf_pos;
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ status = apr_socket_recv(servctx->client_sock, buf, &len);
+ if (status != APR_SUCCESS)
+ return status;
+
+ if (servctx->options & TEST_SERVER_DUMP)
+ fwrite(buf, len, 1, stdout);
+
+ if (strncmp(buf, message->text + servctx->message_buf_pos, len) != 0) {
+ /* ## TODO: Better diagnostics. */
+ printf("Expected: (\n");
+ fwrite(message->text + servctx->message_buf_pos, len, 1, stdout);
+ printf(")\n");
+ printf("Actual: (\n");
+ fwrite(buf, len, 1, stdout);
+ printf(")\n");
+
+ return APR_EGENERAL;
+ }
+
+ servctx->message_buf_pos += len;
+
+ if (servctx->message_buf_pos >= msg_len) {
+ next_message(servctx);
+ servctx->message_buf_pos -= msg_len;
+ if (action->kind == SERVER_RESPOND)
+ servctx->outstanding_responses++;
+ if (action->kind == SERVER_RECV)
+ next_action(servctx);
+ }
+ }
+ }
+ if (rtnevents & APR_POLLOUT) {
+ action = &servctx->action_list[servctx->cur_action];
+
+ if (action->kind == SERVER_RESPOND && servctx->outstanding_responses) {
+ apr_size_t msg_len;
+ apr_size_t len;
+
+ msg_len = strlen(action->text);
+ len = msg_len - servctx->action_buf_pos;
+
+ status = apr_socket_send(servctx->client_sock,
+ action->text + servctx->action_buf_pos, &len);
+ if (status != APR_SUCCESS)
+ return status;
+
+ if (servctx->options & TEST_SERVER_DUMP)
+ fwrite(action->text + servctx->action_buf_pos, len, 1, stdout);
+
+ servctx->action_buf_pos += len;
+
+ if (servctx->action_buf_pos >= msg_len) {
+ next_action(servctx);
+ servctx->outstanding_responses--;
+ }
+ }
+ else if (action->kind == SERVER_KILL_CONNECTION ||
+ action->kind == SERVER_IGNORE_AND_KILL_CONNECTION) {
+ apr_socket_close(servctx->client_sock);
+ servctx->client_sock = NULL;
+ next_action(servctx);
+ }
+ }
+ else if (rtnevents & APR_POLLIN) {
+ /* ignore */
+ }
+ else {
+ printf("Unknown rtnevents: %d\n", rtnevents);
+ abort();
+ }
+
+ return status;
+}
+
+apr_status_t test_server_run(serv_ctx_t *servctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ apr_pollset_t *pollset;
+ apr_int32_t num;
+ const apr_pollfd_t *desc;
+
+ /* create a new pollset */
+ status = apr_pollset_create(&pollset, 32, pool, 0);
+ if (status != APR_SUCCESS)
+ return status;
+
+ /* Don't accept new connection while processing client connection. At
+ least for present time.*/
+ if (servctx->client_sock) {
+ apr_pollfd_t pfd = { pool, APR_POLL_SOCKET, APR_POLLIN | APR_POLLOUT, 0,
+ { NULL }, NULL };
+ pfd.desc.s = servctx->client_sock;
+ status = apr_pollset_add(pollset, &pfd);
+ if (status != APR_SUCCESS)
+ goto cleanup;
+ }
+ else {
+ apr_pollfd_t pfd = { pool, APR_POLL_SOCKET, APR_POLLIN, 0,
+ { NULL }, NULL };
+ pfd.desc.s = servctx->serv_sock;
+ status = apr_pollset_add(pollset, &pfd);
+ if (status != APR_SUCCESS)
+ goto cleanup;
+ }
+
+ status = apr_pollset_poll(pollset, APR_USEC_PER_SEC >> 1, &num, &desc);
+ if (status != APR_SUCCESS)
+ goto cleanup;
+
+ while (num--) {
+ if (desc->desc.s == servctx->serv_sock) {
+ status = apr_socket_accept(&servctx->client_sock, servctx->serv_sock,
+ servctx->pool);
+ if (status != APR_SUCCESS)
+ goto cleanup;
+
+ apr_socket_opt_set(servctx->client_sock, APR_SO_NONBLOCK, 1);
+ apr_socket_timeout_set(servctx->client_sock, 0);
+
+ status = APR_SUCCESS;
+ goto cleanup;
+ }
+
+ if (desc->desc.s == servctx->client_sock) {
+ /* Replay data to socket. */
+ status = replay(servctx, desc->rtnevents, pool);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ apr_socket_close(servctx->client_sock);
+ servctx->client_sock = NULL;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status)) {
+ status = APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS) {
+ /* Real error. */
+ goto cleanup;
+ }
+ }
+
+ desc++;
+ }
+
+cleanup:
+ apr_pollset_destroy(pollset);
+
+ return status;
+}
+
+/* Start a TCP server on port SERV_PORT in thread THREAD. srv_replay is a array
+ of action to replay when connection started. replay_count is count of
+ actions in srv_replay. */
+apr_status_t test_start_server(serv_ctx_t **servctx_p,
+ apr_sockaddr_t *address,
+ test_server_message_t *message_list,
+ apr_size_t message_count,
+ test_server_action_t *action_list,
+ apr_size_t action_count,
+ apr_int32_t options,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ apr_socket_t *serv_sock;
+ serv_ctx_t *servctx;
+
+ servctx = apr_pcalloc(pool, sizeof(*servctx));
+ *servctx_p = servctx;
+
+ servctx->serv_addr = address;
+ servctx->options = options;
+ servctx->pool = pool;
+ servctx->message_list = message_list;
+ servctx->message_count = message_count;
+ servctx->action_list = action_list;
+ servctx->action_count = action_count;
+
+ /* create server socket */
+#if APR_VERSION_AT_LEAST(1, 0, 0)
+ status = apr_socket_create(&serv_sock, address->family, SOCK_STREAM, 0,
+ pool);
+#else
+ status = apr_socket_create(&serv_sock, address->family, SOCK_STREAM, pool);
+#endif
+
+ if (status != APR_SUCCESS)
+ return status;
+
+ apr_socket_opt_set(serv_sock, APR_SO_NONBLOCK, 1);
+ apr_socket_timeout_set(serv_sock, 0);
+ apr_socket_opt_set(serv_sock, APR_SO_REUSEADDR, 1);
+
+ status = apr_socket_bind(serv_sock, servctx->serv_addr);
+ if (status != APR_SUCCESS)
+ return status;
+
+ /* Start replay from first action. */
+ servctx->cur_action = 0;
+ servctx->action_buf_pos = 0;
+ servctx->outstanding_responses = 0;
+
+ /* listen for clients */
+ apr_socket_listen(serv_sock, SOMAXCONN);
+ if (status != APR_SUCCESS)
+ return status;
+
+ servctx->serv_sock = serv_sock;
+ servctx->client_sock = NULL;
+ return APR_SUCCESS;
+}
+
+apr_status_t test_server_destroy(serv_ctx_t *servctx, apr_pool_t *pool)
+{
+ apr_socket_close(servctx->serv_sock);
+
+ if (servctx->client_sock) {
+ apr_socket_close(servctx->client_sock);
+ }
+
+ return APR_SUCCESS;
+}
diff --git a/test/server/test_server.h b/test/server/test_server.h
new file mode 100644
index 0000000..9e8b0e2
--- /dev/null
+++ b/test/server/test_server.h
@@ -0,0 +1,70 @@
+/* Copyright 2011 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TEST_SERVER_H
+#define TEST_SERVER_H
+
+typedef struct serv_ctx_t serv_ctx_t;
+
+#define TEST_SERVER_DUMP 1
+
+/* Default port for our test server. */
+#define SERV_PORT 12345
+#define SERV_PORT_STR "12345"
+
+#define PROXY_PORT 23456
+
+typedef struct
+{
+ enum {
+ SERVER_RECV,
+ SERVER_SEND,
+ SERVER_RESPOND,
+ SERVER_IGNORE_AND_KILL_CONNECTION,
+ SERVER_KILL_CONNECTION
+ } kind;
+
+ const char *text;
+} test_server_action_t;
+
+typedef struct
+{
+ const char *text;
+} test_server_message_t;
+
+apr_status_t test_start_server(serv_ctx_t **servctx_p,
+ apr_sockaddr_t *address,
+ test_server_message_t *message_list,
+ apr_size_t message_count,
+ test_server_action_t *action_list,
+ apr_size_t action_count,
+ apr_int32_t options,
+ apr_pool_t *pool);
+
+apr_status_t test_server_run(serv_ctx_t *servctx,
+ apr_short_interval_time_t duration,
+ apr_pool_t *pool);
+
+apr_status_t test_server_destroy(serv_ctx_t *servctx, apr_pool_t *pool);
+
+#ifndef APR_VERSION_AT_LEAST /* Introduced in APR 1.3.0 */
+#define APR_VERSION_AT_LEAST(major,minor,patch) \
+(((major) < APR_MAJOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) < APR_MINOR_VERSION) \
+ || ((major) == APR_MAJOR_VERSION && (minor) == APR_MINOR_VERSION && \
+ (patch) <= APR_PATCH_VERSION))
+#endif /* APR_VERSION_AT_LEAST */
+
+#endif /* TEST_SERVER_H */
diff --git a/test/test_all.c b/test/test_all.c
new file mode 100644
index 0000000..4cc4c8c
--- /dev/null
+++ b/test/test_all.c
@@ -0,0 +1,101 @@
+/* Copyright 2002-2007 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+#include "apr_pools.h"
+#include "test_serf.h"
+#include <stdlib.h>
+
+static const struct testlist {
+ const char *testname;
+ CuSuite *(*func)(void);
+} tests[] = {
+ {"context", test_context},
+ {"buckets", test_buckets},
+ {"ssl", test_ssl},
+ {"LastTest", NULL}
+};
+
+int main(int argc, char *argv[])
+{
+ CuSuite *alltests = NULL;
+ CuString *output = CuStringNew();
+ int i;
+ int list_provided = 0;
+ int exit_code;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ for (i = 1; i < argc; i++) {
+ if (!strcmp(argv[i], "-v")) {
+ continue;
+ }
+ if (!strcmp(argv[i], "-l")) {
+ for (i = 0; tests[i].func != NULL; i++) {
+ printf("%s\n", tests[i].testname);
+ }
+ exit(0);
+ }
+ if (argv[i][0] == '-') {
+ fprintf(stderr, "invalid option: `%s'\n", argv[i]);
+ exit(1);
+ }
+ list_provided = 1;
+ }
+
+ alltests = CuSuiteNew();
+ if (!list_provided) {
+ /* add everything */
+ for (i = 0; tests[i].func != NULL; i++) {
+ CuSuite *st = tests[i].func();
+ CuSuiteAddSuite(alltests, st);
+ CuSuiteFree(st);
+ }
+ }
+ else {
+ /* add only the tests listed */
+ for (i = 1; i < argc; i++) {
+ int j;
+ int found = 0;
+
+ if (argv[i][0] == '-') {
+ continue;
+ }
+ for (j = 0; tests[j].func != NULL; j++) {
+ if (!strcmp(argv[i], tests[j].testname)) {
+ CuSuiteAddSuite(alltests, tests[j].func());
+ found = 1;
+ }
+ }
+ if (!found) {
+ fprintf(stderr, "invalid test name: `%s'\n", argv[i]);
+ exit(1);
+ }
+ }
+ }
+
+ CuSuiteRun(alltests);
+ CuSuiteSummary(alltests, output);
+ CuSuiteDetails(alltests, output);
+ printf("%s\n", output->buffer);
+
+ exit_code = alltests->failCount > 0 ? 1 : 0;
+
+ CuSuiteFreeDeep(alltests);
+ CuStringFree(output);
+
+ return exit_code;
+}
diff --git a/test/test_buckets.c b/test/test_buckets.c
new file mode 100644
index 0000000..7d4f7ac
--- /dev/null
+++ b/test/test_buckets.c
@@ -0,0 +1,425 @@
+/* Copyright 2002-2007 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr.h>
+#include <apr_pools.h>
+#include <apr_strings.h>
+
+#include "serf.h"
+#include "test_serf.h"
+
+#define CRLF "\r\n"
+
+static void test_simple_bucket_readline(CuTest *tc)
+{
+ apr_status_t status;
+ serf_bucket_t *bkt;
+ const char *data;
+ int found;
+ apr_size_t len;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ bkt = SERF_BUCKET_SIMPLE_STRING(
+ "line1" CRLF
+ "line2",
+ alloc);
+
+ /* Initialize parameters to check that they will be initialized. */
+ len = 0x112233;
+ data = 0;
+ status = serf_bucket_readline(bkt, SERF_NEWLINE_CRLF, &found, &data, &len);
+
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, SERF_NEWLINE_CRLF, found);
+ CuAssertIntEquals(tc, 7, len);
+ CuAssert(tc, data, strncmp("line1" CRLF, data, len) == 0);
+
+ /* Initialize parameters to check that they will be initialized. */
+ len = 0x112233;
+ data = 0;
+ status = serf_bucket_readline(bkt, SERF_NEWLINE_CRLF, &found, &data, &len);
+
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, SERF_NEWLINE_NONE, found);
+ CuAssertIntEquals(tc, 5, len);
+ CuAssert(tc, data, strncmp("line2", data, len) == 0);
+ test_teardown(test_pool);
+}
+
+/* Reads bucket until EOF found and compares read data with zero terminated
+ string expected. Report all failures using CuTest. */
+static void read_and_check_bucket(CuTest *tc, serf_bucket_t *bkt,
+ const char *expected)
+{
+ apr_status_t status;
+ do
+ {
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_read(bkt, SERF_READ_ALL_AVAIL, &data, &len);
+ CuAssert(tc, "Got error during bucket reading.",
+ !SERF_BUCKET_READ_ERROR(status));
+ CuAssert(tc, "Read more data than expected.",
+ strlen(expected) >= len);
+ CuAssert(tc, "Read data is not equal to expected.",
+ strncmp(expected, data, len) == 0);
+
+ expected += len;
+ } while(!APR_STATUS_IS_EOF(status));
+
+ CuAssert(tc, "Read less data than expected.", strlen(expected) == 0);
+}
+
+static void test_response_bucket_read(CuTest *tc)
+{
+ serf_bucket_t *bkt, *tmp;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ tmp = SERF_BUCKET_SIMPLE_STRING(
+ "HTTP/1.1 200 OK" CRLF
+ "Content-Length: 7" CRLF
+ CRLF
+ "abc1234",
+ alloc);
+
+ bkt = serf_bucket_response_create(tmp, alloc);
+
+ /* Read all bucket and check it content. */
+ read_and_check_bucket(tc, bkt, "abc1234");
+ test_teardown(test_pool);
+}
+
+static void test_response_bucket_headers(CuTest *tc)
+{
+ serf_bucket_t *bkt, *tmp, *hdr;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ tmp = SERF_BUCKET_SIMPLE_STRING(
+ "HTTP/1.1 405 Method Not Allowed" CRLF
+ "Date: Sat, 12 Jun 2010 14:17:10 GMT" CRLF
+ "Server: Apache" CRLF
+ "Allow: " CRLF
+ "Content-Length: 7" CRLF
+ "Content-Type: text/html; charset=iso-8859-1" CRLF
+ "NoSpace:" CRLF
+ CRLF
+ "abc1234",
+ alloc);
+
+ bkt = serf_bucket_response_create(tmp, alloc);
+
+ /* Read all bucket and check it content. */
+ read_and_check_bucket(tc, bkt, "abc1234");
+
+ hdr = serf_bucket_response_get_headers(bkt);
+ CuAssertStrEquals(tc,
+ "",
+ serf_bucket_headers_get(hdr, "Allow"));
+ CuAssertStrEquals(tc,
+ "7",
+ serf_bucket_headers_get(hdr, "Content-Length"));
+ CuAssertStrEquals(tc,
+ "",
+ serf_bucket_headers_get(hdr, "NoSpace"));
+ test_teardown(test_pool);
+}
+
+static void test_response_bucket_chunked_read(CuTest *tc)
+{
+ serf_bucket_t *bkt, *tmp, *hdrs;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ tmp = SERF_BUCKET_SIMPLE_STRING(
+ "HTTP/1.1 200 OK" CRLF
+ "Transfer-Encoding: chunked" CRLF
+ CRLF
+ "3" CRLF
+ "abc" CRLF
+ "4" CRLF
+ "1234" CRLF
+ "0" CRLF
+ "Footer: value" CRLF
+ CRLF,
+ alloc);
+
+ bkt = serf_bucket_response_create(tmp, alloc);
+
+ /* Read all bucket and check it content. */
+ read_and_check_bucket(tc, bkt, "abc1234");
+
+ hdrs = serf_bucket_response_get_headers(bkt);
+ CuAssertTrue(tc, hdrs != NULL);
+
+ /* Check that trailing headers parsed correctly. */
+ CuAssertStrEquals(tc, "value", serf_bucket_headers_get(hdrs, "Footer"));
+ test_teardown(test_pool);
+}
+
+static void test_bucket_header_set(CuTest *tc)
+{
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+ serf_bucket_t *hdrs = serf_bucket_headers_create(alloc);
+
+ CuAssertTrue(tc, hdrs != NULL);
+
+ serf_bucket_headers_set(hdrs, "Foo", "bar");
+
+ CuAssertStrEquals(tc, "bar", serf_bucket_headers_get(hdrs, "Foo"));
+
+ serf_bucket_headers_set(hdrs, "Foo", "baz");
+
+ CuAssertStrEquals(tc, "bar,baz", serf_bucket_headers_get(hdrs, "Foo"));
+
+ serf_bucket_headers_set(hdrs, "Foo", "test");
+
+ CuAssertStrEquals(tc, "bar,baz,test", serf_bucket_headers_get(hdrs, "Foo"));
+
+ // headers are case insensitive.
+ CuAssertStrEquals(tc, "bar,baz,test", serf_bucket_headers_get(hdrs, "fOo"));
+ test_teardown(test_pool);
+}
+
+static apr_status_t read_requested_bytes(serf_bucket_t *bkt,
+ apr_size_t requested,
+ const char **buf,
+ apr_size_t *len,
+ apr_pool_t *pool)
+{
+ apr_size_t current = 0;
+ const char *tmp;
+ const char *data;
+ apr_status_t status = APR_SUCCESS;
+
+ tmp = apr_pcalloc(pool, requested);
+ while (current < requested) {
+ status = serf_bucket_read(bkt, requested, &data, len);
+ memcpy((void*)(tmp + current), (void*)data, *len);
+ current += *len;
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ }
+
+ *buf = tmp;
+ *len = current;
+ return status;
+}
+
+
+static void test_iovec_buckets(CuTest *tc)
+{
+ apr_status_t status;
+ serf_bucket_t *bkt, *iobkt;
+ const char *data;
+ apr_size_t len;
+ struct iovec vecs[32];
+ struct iovec tgt_vecs[32];
+ int i;
+ int vecs_used;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ /* Test 1: Read a single string in an iovec, store it in a iovec_bucket
+ and then read it back. */
+ bkt = SERF_BUCKET_SIMPLE_STRING(
+ "line1" CRLF
+ "line2",
+ alloc);
+
+ status = serf_bucket_read_iovec(bkt, SERF_READ_ALL_AVAIL, 32, vecs,
+ &vecs_used);
+
+ iobkt = serf_bucket_iovec_create(vecs, vecs_used, alloc);
+
+ /* Check available data */
+ status = serf_bucket_peek(iobkt, &data, &len);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, strlen("line1" CRLF "line2"), len);
+
+ /* Try to read only a few bytes (less than what's in the first buffer). */
+ status = serf_bucket_read_iovec(iobkt, 3, 32, tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 1, vecs_used);
+ CuAssertIntEquals(tc, 3, tgt_vecs[0].iov_len);
+ CuAssert(tc, tgt_vecs[0].iov_base,
+ strncmp("lin", tgt_vecs[0].iov_base, tgt_vecs[0].iov_len) == 0);
+
+ /* Read the rest of the data. */
+ status = serf_bucket_read_iovec(iobkt, SERF_READ_ALL_AVAIL, 32, tgt_vecs,
+ &vecs_used);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 1, vecs_used);
+ CuAssertIntEquals(tc, strlen("e1" CRLF "line2"), tgt_vecs[0].iov_len);
+ CuAssert(tc, tgt_vecs[0].iov_base,
+ strncmp("e1" CRLF "line2", tgt_vecs[0].iov_base, tgt_vecs[0].iov_len - 3) == 0);
+
+ /* Bucket should now be empty */
+ status = serf_bucket_peek(iobkt, &data, &len);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 0, len);
+
+ /* Test 2: Read multiple character bufs in an iovec, then read them back
+ in bursts. */
+ for (i = 0; i < 32 ; i++) {
+ vecs[i].iov_base = apr_psprintf(test_pool, "data %02d 901234567890", i);
+ vecs[i].iov_len = strlen(vecs[i].iov_base);
+ }
+
+ iobkt = serf_bucket_iovec_create(vecs, 32, alloc);
+
+ /* Check that some data is in the buffer. Don't verify the actual data, the
+ amount of data returned is not guaranteed to be the full buffer. */
+ status = serf_bucket_peek(iobkt, &data, &len);
+ CuAssertTrue(tc, len > 0);
+ CuAssertIntEquals(tc, APR_SUCCESS, status); /* this assumes not all data is
+ returned at once,
+ not guaranteed! */
+
+ /* Read 1 buf. 20 = sizeof("data %2d 901234567890") */
+ status = serf_bucket_read_iovec(iobkt, 1 * 20, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 1, vecs_used);
+ CuAssert(tc, tgt_vecs[0].iov_base,
+ strncmp("data 00 901234567890", tgt_vecs[0].iov_base, tgt_vecs[0].iov_len) == 0);
+
+ /* Read 2 bufs. */
+ status = serf_bucket_read_iovec(iobkt, 2 * 20, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 2, vecs_used);
+
+ /* Read the remaining 29 bufs. */
+ vecs_used = 400; /* test if iovec code correctly resets vecs_used */
+ status = serf_bucket_read_iovec(iobkt, SERF_READ_ALL_AVAIL, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 29, vecs_used);
+
+ /* Test 3: use serf_bucket_read */
+ for (i = 0; i < 32 ; i++) {
+ vecs[i].iov_base = apr_psprintf(test_pool, "DATA %02d 901234567890", i);
+ vecs[i].iov_len = strlen(vecs[i].iov_base);
+ }
+
+ iobkt = serf_bucket_iovec_create(vecs, 32, alloc);
+
+ status = serf_bucket_read(iobkt, 10, &data, &len);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 10, len);
+ CuAssert(tc, data,
+ strncmp("DATA 00 90", data, len) == 0);
+
+ status = serf_bucket_read(iobkt, 10, &data, &len);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 10, len);
+ CuAssert(tc, tgt_vecs[0].iov_base,
+ strncmp("1234567890", data, len) == 0);
+
+ for (i = 1; i < 31 ; i++) {
+ const char *exp = apr_psprintf(test_pool, "DATA %02d 901234567890", i);
+ status = serf_bucket_read(iobkt, SERF_READ_ALL_AVAIL, &data, &len);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 20, len);
+ CuAssert(tc, data,
+ strncmp(exp, data, len) == 0);
+
+ }
+
+ status = serf_bucket_read(iobkt, 20, &data, &len);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 20, len);
+ CuAssert(tc, data,
+ strncmp("DATA 31 901234567890", data, len) == 0);
+
+ /* Test 3: read an empty iovec */
+ iobkt = serf_bucket_iovec_create(vecs, 0, alloc);
+ status = serf_bucket_read_iovec(iobkt, SERF_READ_ALL_AVAIL, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 0, vecs_used);
+
+ status = serf_bucket_read(iobkt, SERF_READ_ALL_AVAIL, &data, &len);
+ CuAssertIntEquals(tc, APR_EOF, status);
+ CuAssertIntEquals(tc, 0, len);
+
+ /* Test 4: read 0 bytes from an iovec */
+ bkt = SERF_BUCKET_SIMPLE_STRING("line1" CRLF, alloc);
+ status = serf_bucket_read_iovec(bkt, SERF_READ_ALL_AVAIL, 32, vecs,
+ &vecs_used);
+ iobkt = serf_bucket_iovec_create(vecs, vecs_used, alloc);
+ status = serf_bucket_read_iovec(iobkt, 0, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 0, vecs_used);
+
+ test_teardown(test_pool);
+}
+
+static void test_aggregate_buckets(CuTest *tc)
+{
+ apr_status_t status;
+ serf_bucket_t *bkt, *aggbkt;
+ struct iovec tgt_vecs[32];
+ int vecs_used;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ /* Test 1: read 0 bytes from an aggregate */
+ aggbkt = serf_bucket_aggregate_create(alloc);
+
+ bkt = SERF_BUCKET_SIMPLE_STRING("line1" CRLF, alloc);
+ serf_bucket_aggregate_append(aggbkt, bkt);
+
+ status = serf_bucket_read_iovec(aggbkt, 0, 32,
+ tgt_vecs, &vecs_used);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertIntEquals(tc, 0, vecs_used);
+
+ test_teardown(test_pool);
+}
+
+CuSuite *test_buckets(void)
+{
+ CuSuite *suite = CuSuiteNew();
+
+ SUITE_ADD_TEST(suite, test_simple_bucket_readline);
+ SUITE_ADD_TEST(suite, test_response_bucket_read);
+ SUITE_ADD_TEST(suite, test_response_bucket_headers);
+ SUITE_ADD_TEST(suite, test_response_bucket_chunked_read);
+ SUITE_ADD_TEST(suite, test_bucket_header_set);
+ SUITE_ADD_TEST(suite, test_iovec_buckets);
+ SUITE_ADD_TEST(suite, test_aggregate_buckets);
+
+ return suite;
+}
diff --git a/test/test_context.c b/test/test_context.c
new file mode 100644
index 0000000..d2a24d0
--- /dev/null
+++ b/test/test_context.c
@@ -0,0 +1,1011 @@
+/* Copyright 2002-2007 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_pools.h>
+#include <apr_strings.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+#include "test_serf.h"
+#include "server/test_server.h"
+
+typedef struct {
+ serf_response_acceptor_t acceptor;
+ void *acceptor_baton;
+
+ serf_response_handler_t handler;
+
+ apr_array_header_t *sent_requests;
+ apr_array_header_t *accepted_requests;
+ apr_array_header_t *handled_requests;
+ int req_id;
+
+ const char *method;
+ const char *path;
+ int done;
+
+ test_baton_t *tb;
+} handler_baton_t;
+
+static serf_bucket_t* accept_response(serf_request_t *request,
+ serf_bucket_t *stream,
+ void *acceptor_baton,
+ apr_pool_t *pool)
+{
+ serf_bucket_t *c;
+ serf_bucket_alloc_t *bkt_alloc;
+ handler_baton_t *ctx = acceptor_baton;
+
+ /* get the per-request bucket allocator */
+ bkt_alloc = serf_request_get_alloc(request);
+
+ /* Create a barrier so the response doesn't eat us! */
+ c = serf_bucket_barrier_create(stream, bkt_alloc);
+
+ APR_ARRAY_PUSH(ctx->accepted_requests, int) = ctx->req_id;
+
+ return serf_bucket_response_create(c, bkt_alloc);
+}
+
+static apr_status_t setup_request(serf_request_t *request,
+ void *setup_baton,
+ serf_bucket_t **req_bkt,
+ serf_response_acceptor_t *acceptor,
+ void **acceptor_baton,
+ serf_response_handler_t *handler,
+ void **handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = setup_baton;
+ serf_bucket_t *body_bkt;
+
+ /* create a simple body text */
+ const char *str = apr_psprintf(pool, "%d", ctx->req_id);
+ body_bkt = serf_bucket_simple_create(str, strlen(str), NULL, NULL,
+ serf_request_get_alloc(request));
+ *req_bkt =
+ serf_request_bucket_request_create(request,
+ ctx->method, ctx->path,
+ body_bkt,
+ serf_request_get_alloc(request));
+
+ APR_ARRAY_PUSH(ctx->sent_requests, int) = ctx->req_id;
+
+ *acceptor = ctx->acceptor;
+ *acceptor_baton = ctx;
+ *handler = ctx->handler;
+ *handler_baton = ctx;
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t handle_response(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = handler_baton;
+
+ if (! response) {
+ serf_connection_request_create(ctx->tb->connection,
+ setup_request,
+ ctx);
+ return APR_SUCCESS;
+ }
+
+ while (1) {
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status))
+ return status;
+
+ if (APR_STATUS_IS_EOF(status)) {
+ APR_ARRAY_PUSH(ctx->handled_requests, int) = ctx->req_id;
+ ctx->done = TRUE;
+ return APR_EOF;
+ }
+
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return status;
+ }
+
+ }
+
+ return APR_SUCCESS;
+}
+
+/* Validate that requests are sent and completed in the order of creation. */
+static void test_serf_connection_request_create(CuTest *tc)
+{
+ test_baton_t *tb;
+ serf_request_t *request1, *request2;
+ handler_baton_t handler_ctx, handler2_ctx;
+ apr_status_t status;
+ apr_pool_t *iter_pool;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ int i;
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, 2, sizeof(int));
+ sent_requests = apr_array_make(test_pool, 2, sizeof(int));
+ handled_requests = apr_array_make(test_pool, 2, sizeof(int));
+
+ /* Set up a test context with a server */
+ status = test_server_setup(&tb,
+ message_list, 2,
+ action_list, 2, 0, NULL,
+ test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ handler_ctx.method = "GET";
+ handler_ctx.path = "/";
+ handler_ctx.done = FALSE;
+
+ handler_ctx.acceptor = accept_response;
+ handler_ctx.acceptor_baton = NULL;
+ handler_ctx.handler = handle_response;
+ handler_ctx.req_id = 1;
+ handler_ctx.accepted_requests = accepted_requests;
+ handler_ctx.sent_requests = sent_requests;
+ handler_ctx.handled_requests = handled_requests;
+
+ request1 = serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx);
+
+ handler2_ctx = handler_ctx;
+ handler2_ctx.req_id = 2;
+
+ request2 = serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler2_ctx);
+
+ apr_pool_create(&iter_pool, test_pool);
+
+ while (!handler_ctx.done || !handler2_ctx.done)
+ {
+ apr_pool_clear(iter_pool);
+
+ status = test_server_run(tb->serv_ctx, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ }
+ apr_pool_destroy(iter_pool);
+
+ /* Check that all requests were received */
+ CuAssertIntEquals(tc, 2, sent_requests->nelts);
+ CuAssertIntEquals(tc, 2, accepted_requests->nelts);
+ CuAssertIntEquals(tc, 2, handled_requests->nelts);
+
+ /* Check that the requests were sent in the order we created them */
+ for (i = 0; i < sent_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(sent_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ /* Check that the requests were received in the order we created them */
+ for (i = 0; i < handled_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(handled_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+
+/* Validate that priority requests are sent and completed before normal
+ requests. */
+static void test_serf_connection_priority_request_create(CuTest *tc)
+{
+ test_baton_t *tb;
+ serf_request_t *request1, *request2, *request3;
+ handler_baton_t handler_ctx, handler2_ctx, handler3_ctx;
+ apr_status_t status;
+ apr_pool_t *iter_pool;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ int i;
+
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ {CHUNKED_REQUEST(1, "3")},
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, 3, sizeof(int));
+ sent_requests = apr_array_make(test_pool, 3, sizeof(int));
+ handled_requests = apr_array_make(test_pool, 3, sizeof(int));
+
+ /* Set up a test context with a server */
+ status = test_server_setup(&tb,
+ message_list, 3,
+ action_list, 3, 0, NULL,
+ test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ handler_ctx.method = "GET";
+ handler_ctx.path = "/";
+ handler_ctx.done = FALSE;
+
+ handler_ctx.acceptor = accept_response;
+ handler_ctx.acceptor_baton = NULL;
+ handler_ctx.handler = handle_response;
+ handler_ctx.req_id = 2;
+ handler_ctx.accepted_requests = accepted_requests;
+ handler_ctx.sent_requests = sent_requests;
+ handler_ctx.handled_requests = handled_requests;
+
+ request1 = serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx);
+
+ handler2_ctx = handler_ctx;
+ handler2_ctx.req_id = 3;
+
+ request2 = serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler2_ctx);
+ handler3_ctx = handler_ctx;
+ handler3_ctx.req_id = 1;
+
+ request3 = serf_connection_priority_request_create(tb->connection,
+ setup_request,
+ &handler3_ctx);
+
+ apr_pool_create(&iter_pool, test_pool);
+
+ while (!handler_ctx.done || !handler2_ctx.done || !handler3_ctx.done)
+ {
+ apr_pool_clear(iter_pool);
+
+ status = test_server_run(tb->serv_ctx, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+ }
+ apr_pool_destroy(iter_pool);
+
+ /* Check that all requests were received */
+ CuAssertIntEquals(tc, 3, sent_requests->nelts);
+ CuAssertIntEquals(tc, 3, accepted_requests->nelts);
+ CuAssertIntEquals(tc, 3, handled_requests->nelts);
+
+ /* Check that the requests were sent in the order we created them */
+ for (i = 0; i < sent_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(sent_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ /* Check that the requests were received in the order we created them */
+ for (i = 0; i < handled_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(handled_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+
+/* Test that serf correctly handles the 'Connection:close' header when the
+ server is planning to close the connection. */
+#define NUM_REQUESTS 10
+static void test_serf_closed_connection(CuTest *tc)
+{
+ test_baton_t *tb;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ apr_status_t status;
+ handler_baton_t handler_ctx[NUM_REQUESTS];
+ int done = FALSE, i;
+
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ {CHUNKED_REQUEST(1, "3")},
+ {CHUNKED_REQUEST(1, "4")},
+ {CHUNKED_REQUEST(1, "5")},
+ {CHUNKED_REQUEST(1, "6")},
+ {CHUNKED_REQUEST(1, "7")},
+ {CHUNKED_REQUEST(1, "8")},
+ {CHUNKED_REQUEST(1, "9")},
+ {CHUNKED_REQUEST(2, "10")}
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND,
+ "HTTP/1.1 200 OK" CRLF
+ "Transfer-Encoding: chunked" CRLF
+ "Connection: close" CRLF
+ CRLF
+ "0" CRLF
+ CRLF
+ },
+ {SERVER_IGNORE_AND_KILL_CONNECTION},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND,
+ "HTTP/1.1 200 OK" CRLF
+ "Transfer-Encoding: chunked" CRLF
+ "Connection: close" CRLF
+ CRLF
+ "0" CRLF
+ CRLF
+ },
+ {SERVER_IGNORE_AND_KILL_CONNECTION},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+ sent_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+ handled_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+
+ /* Set up a test context with a server. */
+ status = test_server_setup(&tb,
+ message_list, 10,
+ action_list, 12,
+ 0,
+ NULL,
+ test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ for (i = 0 ; i < NUM_REQUESTS ; i++) {
+ /* Send some requests on the connections */
+ handler_ctx[i].method = "GET";
+ handler_ctx[i].path = "/";
+ handler_ctx[i].done = FALSE;
+
+ handler_ctx[i].acceptor = accept_response;
+ handler_ctx[i].acceptor_baton = NULL;
+ handler_ctx[i].handler = handle_response;
+ handler_ctx[i].req_id = i+1;
+ handler_ctx[i].accepted_requests = accepted_requests;
+ handler_ctx[i].sent_requests = sent_requests;
+ handler_ctx[i].handled_requests = handled_requests;
+ handler_ctx[i].tb = tb;
+
+ serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx[i]);
+ }
+
+ while (1) {
+ status = test_server_run(tb->serv_ctx, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+
+ done = TRUE;
+ for (i = 0 ; i < NUM_REQUESTS ; i++)
+ if (handler_ctx[i].done == FALSE) {
+ done = FALSE;
+ break;
+ }
+ if (done)
+ break;
+ }
+
+ /* Check that all requests were received */
+ CuAssertTrue(tc, sent_requests->nelts >= NUM_REQUESTS);
+ CuAssertIntEquals(tc, NUM_REQUESTS, accepted_requests->nelts);
+ CuAssertIntEquals(tc, NUM_REQUESTS, handled_requests->nelts);
+
+ /* Cleanup */
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+#undef NUM_REQUESTS
+
+/* Test if serf is sending the request to the proxy, not to the server
+ directly. */
+static void test_serf_setup_proxy(CuTest *tc)
+{
+ test_baton_t *tb;
+ serf_request_t *request;
+ handler_baton_t handler_ctx;
+ apr_status_t status;
+ apr_pool_t *iter_pool;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ int i;
+ int numrequests = 1;
+
+ test_server_message_t message_list[] = {
+ {"GET http://localhost:" SERV_PORT_STR " HTTP/1.1" CRLF\
+ "Host: localhost:" SERV_PORT_STR CRLF\
+ "Transfer-Encoding: chunked" CRLF\
+ CRLF\
+ "1" CRLF\
+ "1" CRLF\
+ "0" CRLF\
+ CRLF}
+ };
+
+ test_server_action_t action_list_proxy[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, numrequests, sizeof(int));
+ sent_requests = apr_array_make(test_pool, numrequests, sizeof(int));
+ handled_requests = apr_array_make(test_pool, numrequests, sizeof(int));
+
+ /* Set up a test context with a server, no messages expected. */
+ status = test_server_proxy_setup(&tb,
+ /* server messages and actions */
+ NULL, 0,
+ NULL, 0,
+ /* server messages and actions */
+ message_list, 1,
+ action_list_proxy, 1,
+ 0,
+ NULL, test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ handler_ctx.method = "GET";
+ handler_ctx.path = "/";
+ handler_ctx.done = FALSE;
+
+ handler_ctx.acceptor = accept_response;
+ handler_ctx.acceptor_baton = NULL;
+ handler_ctx.handler = handle_response;
+ handler_ctx.req_id = 1;
+ handler_ctx.accepted_requests = accepted_requests;
+ handler_ctx.sent_requests = sent_requests;
+ handler_ctx.handled_requests = handled_requests;
+
+ request = serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx);
+
+ apr_pool_create(&iter_pool, test_pool);
+
+ while (!handler_ctx.done)
+ {
+ apr_pool_clear(iter_pool);
+
+ status = test_server_run(tb->serv_ctx, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = test_server_run(tb->proxy_ctx, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, iter_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+ }
+ apr_pool_destroy(iter_pool);
+
+ /* Check that all requests were received */
+ CuAssertIntEquals(tc, numrequests, sent_requests->nelts);
+ CuAssertIntEquals(tc, numrequests, accepted_requests->nelts);
+ CuAssertIntEquals(tc, numrequests, handled_requests->nelts);
+
+ /* Check that the requests were sent in the order we created them */
+ for (i = 0; i < sent_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(sent_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ /* Check that the requests were received in the order we created them */
+ for (i = 0; i < handled_requests->nelts; i++) {
+ int req_nr = APR_ARRAY_IDX(handled_requests, i, int);
+ CuAssertIntEquals(tc, i + 1, req_nr);
+ }
+
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+
+/*****************************************************************************
+ * Test if we can make serf send requests one by one.
+ *****************************************************************************/
+
+/* Resend the first request 4 times by reducing the pipeline bandwidth to
+ one request at a time, and by adding the first request again at the start of
+ the outgoing queue. */
+static apr_status_t
+handle_response_keepalive_limit(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = handler_baton;
+
+ if (! response) {
+ return APR_SUCCESS;
+ }
+
+ while (1) {
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ if (APR_STATUS_IS_EOF(status)) {
+ APR_ARRAY_PUSH(ctx->handled_requests, int) = ctx->req_id;
+ ctx->done = TRUE;
+ if (ctx->req_id == 1 && ctx->handled_requests->nelts < 3) {
+ serf_connection_priority_request_create(ctx->tb->connection,
+ setup_request,
+ ctx);
+ ctx->done = FALSE;
+ }
+ return APR_EOF;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+#define SEND_REQUESTS 5
+#define RCVD_REQUESTS 7
+static void test_keepalive_limit_one_by_one(CuTest *tc)
+{
+ test_baton_t *tb;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ apr_status_t status;
+ handler_baton_t handler_ctx[SEND_REQUESTS];
+ int done = FALSE, i;
+
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ {CHUNKED_REQUEST(1, "3")},
+ {CHUNKED_REQUEST(1, "4")},
+ {CHUNKED_REQUEST(1, "5")},
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+ sent_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+ handled_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+
+ /* Set up a test context with a server. */
+ status = test_server_setup(&tb,
+ message_list, 7,
+ action_list, 7, 0, NULL,
+ test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ for (i = 0 ; i < SEND_REQUESTS ; i++) {
+ /* Send some requests on the connections */
+ handler_ctx[i].method = "GET";
+ handler_ctx[i].path = "/";
+ handler_ctx[i].done = FALSE;
+
+ handler_ctx[i].acceptor = accept_response;
+ handler_ctx[i].acceptor_baton = NULL;
+ handler_ctx[i].handler = handle_response_keepalive_limit;
+ handler_ctx[i].req_id = i+1;
+ handler_ctx[i].accepted_requests = accepted_requests;
+ handler_ctx[i].sent_requests = sent_requests;
+ handler_ctx[i].handled_requests = handled_requests;
+ handler_ctx[i].tb = tb;
+
+ serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx[i]);
+ serf_connection_set_max_outstanding_requests(tb->connection, 1);
+ }
+
+ while (1) {
+ status = test_server_run(tb->serv_ctx, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+
+ done = TRUE;
+ for (i = 0 ; i < SEND_REQUESTS ; i++)
+ if (handler_ctx[i].done == FALSE) {
+ done = FALSE;
+ break;
+ }
+ if (done)
+ break;
+ }
+
+ /* Check that all requests were received */
+ CuAssertIntEquals(tc, RCVD_REQUESTS, sent_requests->nelts);
+ CuAssertIntEquals(tc, RCVD_REQUESTS, accepted_requests->nelts);
+ CuAssertIntEquals(tc, RCVD_REQUESTS, handled_requests->nelts);
+
+ /* Cleanup */
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+#undef SEND_REQUESTS
+#undef RCVD_REQUESTS
+
+/*****************************************************************************
+ * Test if we can make serf first send requests one by one, and then change
+ * back to burst mode.
+ *****************************************************************************/
+#define SEND_REQUESTS 5
+#define RCVD_REQUESTS 7
+/* Resend the first request 2 times by reducing the pipeline bandwidth to
+ one request at a time, and by adding the first request again at the start of
+ the outgoing queue. */
+static apr_status_t
+handle_response_keepalive_limit_burst(serf_request_t *request,
+ serf_bucket_t *response,
+ void *handler_baton,
+ apr_pool_t *pool)
+{
+ handler_baton_t *ctx = handler_baton;
+
+ if (! response) {
+ return APR_SUCCESS;
+ }
+
+ while (1) {
+ apr_status_t status;
+ const char *data;
+ apr_size_t len;
+
+ status = serf_bucket_read(response, 2048, &data, &len);
+ if (SERF_BUCKET_READ_ERROR(status)) {
+ return status;
+ }
+
+ if (APR_STATUS_IS_EOF(status)) {
+ APR_ARRAY_PUSH(ctx->handled_requests, int) = ctx->req_id;
+ ctx->done = TRUE;
+ if (ctx->req_id == 1 && ctx->handled_requests->nelts < 3) {
+ serf_connection_priority_request_create(ctx->tb->connection,
+ setup_request,
+ ctx);
+ ctx->done = FALSE;
+ }
+ else {
+ /* No more one-by-one. */
+ serf_connection_set_max_outstanding_requests(ctx->tb->connection,
+ 0);
+ }
+ return APR_EOF;
+ }
+
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ return status;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static void test_keepalive_limit_one_by_one_and_burst(CuTest *tc)
+{
+ test_baton_t *tb;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ apr_status_t status;
+ handler_baton_t handler_ctx[SEND_REQUESTS];
+ int done = FALSE, i;
+
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ {CHUNKED_REQUEST(1, "3")},
+ {CHUNKED_REQUEST(1, "4")},
+ {CHUNKED_REQUEST(1, "5")},
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+ sent_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+ handled_requests = apr_array_make(test_pool, RCVD_REQUESTS, sizeof(int));
+
+ /* Set up a test context with a server. */
+ status = test_server_setup(&tb,
+ message_list, 7,
+ action_list, 7, 0, NULL,
+ test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ for (i = 0 ; i < SEND_REQUESTS ; i++) {
+ /* Send some requests on the connections */
+ handler_ctx[i].method = "GET";
+ handler_ctx[i].path = "/";
+ handler_ctx[i].done = FALSE;
+
+ handler_ctx[i].acceptor = accept_response;
+ handler_ctx[i].acceptor_baton = NULL;
+ handler_ctx[i].handler = handle_response_keepalive_limit_burst;
+ handler_ctx[i].req_id = i+1;
+ handler_ctx[i].accepted_requests = accepted_requests;
+ handler_ctx[i].sent_requests = sent_requests;
+ handler_ctx[i].handled_requests = handled_requests;
+ handler_ctx[i].tb = tb;
+
+ serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx[i]);
+ serf_connection_set_max_outstanding_requests(tb->connection, 1);
+ }
+
+ while (1) {
+ status = test_server_run(tb->serv_ctx, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+
+ done = TRUE;
+ for (i = 0 ; i < SEND_REQUESTS ; i++)
+ if (handler_ctx[i].done == FALSE) {
+ done = FALSE;
+ break;
+ }
+ if (done)
+ break;
+ }
+
+ /* Check that all requests were received */
+ CuAssertIntEquals(tc, RCVD_REQUESTS, sent_requests->nelts);
+ CuAssertIntEquals(tc, RCVD_REQUESTS, accepted_requests->nelts);
+ CuAssertIntEquals(tc, RCVD_REQUESTS, handled_requests->nelts);
+
+ /* Cleanup */
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+#undef SEND_REQUESTS
+#undef RCVD_REQUESTS
+
+#define NUM_REQUESTS 5
+typedef struct {
+ apr_off_t read;
+ apr_off_t written;
+} progress_baton_t;
+
+static void
+progress_cb(void *progress_baton, apr_off_t read, apr_off_t written)
+{
+ test_baton_t *tb = progress_baton;
+ progress_baton_t *pb = tb->user_baton;
+
+ pb->read = read;
+ pb->written = written;
+}
+
+static apr_status_t progress_conn_setup(apr_socket_t *skt,
+ serf_bucket_t **input_bkt,
+ serf_bucket_t **output_bkt,
+ void *setup_baton,
+ apr_pool_t *pool)
+{
+ test_baton_t *tb = setup_baton;
+ *input_bkt = serf_context_bucket_socket_create(tb->context, skt, tb->bkt_alloc);
+ return APR_SUCCESS;
+}
+
+static void test_serf_progress_callback(CuTest *tc)
+{
+ test_baton_t *tb;
+ apr_array_header_t *accepted_requests, *handled_requests, *sent_requests;
+ apr_status_t status;
+ handler_baton_t handler_ctx[NUM_REQUESTS];
+ int done = FALSE, i;
+ progress_baton_t *pb;
+
+ test_server_message_t message_list[] = {
+ {CHUNKED_REQUEST(1, "1")},
+ {CHUNKED_REQUEST(1, "2")},
+ {CHUNKED_REQUEST(1, "3")},
+ {CHUNKED_REQUEST(1, "4")},
+ {CHUNKED_REQUEST(1, "5")},
+ };
+
+ test_server_action_t action_list[] = {
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_RESPONSE(1, "2")},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ {SERVER_RESPOND, CHUNKED_EMPTY_RESPONSE},
+ };
+
+ apr_pool_t *test_pool = test_setup();
+
+ accepted_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+ sent_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+ handled_requests = apr_array_make(test_pool, NUM_REQUESTS, sizeof(int));
+
+ /* Set up a test context with a server. */
+ status = test_server_setup(&tb,
+ message_list, 5,
+ action_list, 5, 0,
+ progress_conn_setup, test_pool);
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Set up the progress callback. */
+ pb = apr_pcalloc(test_pool, sizeof(*pb));
+ tb->user_baton = pb;
+ serf_context_set_progress_cb(tb->context, progress_cb, tb);
+
+ for (i = 0 ; i < NUM_REQUESTS ; i++) {
+ /* Send some requests on the connections */
+ handler_ctx[i].method = "GET";
+ handler_ctx[i].path = "/";
+ handler_ctx[i].done = FALSE;
+
+ handler_ctx[i].acceptor = accept_response;
+ handler_ctx[i].acceptor_baton = NULL;
+ handler_ctx[i].handler = handle_response;
+ handler_ctx[i].req_id = i+1;
+ handler_ctx[i].accepted_requests = accepted_requests;
+ handler_ctx[i].sent_requests = sent_requests;
+ handler_ctx[i].handled_requests = handled_requests;
+ handler_ctx[i].tb = tb;
+
+ serf_connection_request_create(tb->connection,
+ setup_request,
+ &handler_ctx[i]);
+ }
+
+ while (1) {
+ status = test_server_run(tb->serv_ctx, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ status = serf_context_run(tb->context, 0, test_pool);
+ if (APR_STATUS_IS_TIMEUP(status))
+ status = APR_SUCCESS;
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+ /* Debugging purposes only! */
+ serf_debug__closed_conn(tb->bkt_alloc);
+
+ done = TRUE;
+ for (i = 0 ; i < NUM_REQUESTS ; i++)
+ if (handler_ctx[i].done == FALSE) {
+ done = FALSE;
+ break;
+ }
+ if (done)
+ break;
+ }
+
+ /* Check that all requests were received */
+ CuAssertTrue(tc, sent_requests->nelts >= NUM_REQUESTS);
+ CuAssertIntEquals(tc, NUM_REQUESTS, accepted_requests->nelts);
+ CuAssertIntEquals(tc, NUM_REQUESTS, handled_requests->nelts);
+
+ /* Check that progress was reported. */
+ CuAssertTrue(tc, pb->written > 0);
+ CuAssertTrue(tc, pb->read > 0);
+
+ /* Cleanup */
+ test_server_teardown(tb, test_pool);
+ test_teardown(test_pool);
+}
+#undef NUM_REQUESTS
+
+CuSuite *test_context(void)
+{
+ CuSuite *suite = CuSuiteNew();
+
+ SUITE_ADD_TEST(suite, test_serf_connection_request_create);
+ SUITE_ADD_TEST(suite, test_serf_connection_priority_request_create);
+ SUITE_ADD_TEST(suite, test_serf_closed_connection);
+ SUITE_ADD_TEST(suite, test_serf_setup_proxy);
+ SUITE_ADD_TEST(suite, test_keepalive_limit_one_by_one);
+ SUITE_ADD_TEST(suite, test_keepalive_limit_one_by_one_and_burst);
+ SUITE_ADD_TEST(suite, test_serf_progress_callback);
+
+ return suite;
+}
diff --git a/test/test_serf.h b/test/test_serf.h
new file mode 100644
index 0000000..454dfcc
--- /dev/null
+++ b/test/test_serf.h
@@ -0,0 +1,126 @@
+/* Copyright 2002-2007 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TEST_SERF_H
+#define TEST_SERF_H
+
+#include "CuTest.h"
+
+#include <apr.h>
+#include <apr_pools.h>
+#include <apr_uri.h>
+
+#include "serf.h"
+#include "server/test_server.h"
+
+/** These macros are provided by APR itself from version 1.3.
+ * Definitions are provided here for when using older versions of APR.
+ */
+
+/** index into an apr_array_header_t */
+#ifndef APR_ARRAY_IDX
+#define APR_ARRAY_IDX(ary,i,type) (((type *)(ary)->elts)[i])
+#endif
+
+/** easier array-pushing syntax */
+#ifndef APR_ARRAY_PUSH
+#define APR_ARRAY_PUSH(ary,type) (*((type *)apr_array_push(ary)))
+#endif
+
+/* CuTest declarations */
+CuSuite *getsuite(void);
+
+CuSuite *test_context(void);
+CuSuite *test_buckets(void);
+CuSuite *test_ssl(void);
+
+/* Test setup declarations */
+
+#define CRLF "\r\n"
+
+#define CHUNKED_REQUEST(len, body)\
+ "GET / HTTP/1.1" CRLF\
+ "Host: localhost:12345" CRLF\
+ "Transfer-Encoding: chunked" CRLF\
+ CRLF\
+ #len CRLF\
+ body CRLF\
+ "0" CRLF\
+ CRLF
+
+#define CHUNKED_RESPONSE(len, body)\
+ "HTTP/1.1 200 OK" CRLF\
+ "Transfer-Encoding: chunked" CRLF\
+ CRLF\
+ #len CRLF\
+ body CRLF\
+ "0" CRLF\
+ CRLF
+
+#define CHUNKED_EMPTY_RESPONSE\
+ "HTTP/1.1 200 OK" CRLF\
+ "Transfer-Encoding: chunked" CRLF\
+ CRLF\
+ "0" CRLF\
+ CRLF
+
+typedef struct {
+ /* Pool for resource allocation. */
+ apr_pool_t *pool;
+
+ serf_context_t *context;
+ serf_connection_t *connection;
+ serf_bucket_alloc_t *bkt_alloc;
+
+ serv_ctx_t *serv_ctx;
+ apr_sockaddr_t *serv_addr;
+
+ serv_ctx_t *proxy_ctx;
+ apr_sockaddr_t *proxy_addr;
+
+ /* An extra baton which can be freely used by tests. */
+ void *user_baton;
+
+} test_baton_t;
+
+apr_status_t test_server_setup(test_baton_t **tb_p,
+ test_server_message_t *message_list,
+ apr_size_t message_count,
+ test_server_action_t *action_list,
+ apr_size_t action_count,
+ apr_int32_t options,
+ serf_connection_setup_t conn_setup,
+ apr_pool_t *pool);
+
+apr_status_t test_server_proxy_setup(
+ test_baton_t **tb_p,
+ test_server_message_t *serv_message_list,
+ apr_size_t serv_message_count,
+ test_server_action_t *serv_action_list,
+ apr_size_t serv_action_count,
+ test_server_message_t *proxy_message_list,
+ apr_size_t proxy_message_count,
+ test_server_action_t *proxy_action_list,
+ apr_size_t proxy_action_count,
+ apr_int32_t options,
+ serf_connection_setup_t conn_setup,
+ apr_pool_t *pool);
+
+apr_status_t test_server_teardown(test_baton_t *tb, apr_pool_t *pool);
+
+apr_pool_t *test_setup(void);
+void test_teardown(apr_pool_t *test_pool);
+
+#endif /* TEST_SERF_H */
diff --git a/test/test_ssl.c b/test/test_ssl.c
new file mode 100644
index 0000000..81b939d
--- /dev/null
+++ b/test/test_ssl.c
@@ -0,0 +1,112 @@
+/* Copyright 2008 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <apr.h>
+#include <apr_pools.h>
+#include <apr_strings.h>
+#include <apr_env.h>
+
+#include "serf.h"
+#include "serf_bucket_types.h"
+
+#include "test_serf.h"
+
+#if defined(WIN32) && defined(_DEBUG)
+/* Include this file to allow running a Debug build of serf with a Release
+ build of OpenSSL. */
+#include <openssl/applink.c>
+#endif
+
+/* Test setting up the openssl library. */
+static void test_ssl_init(CuTest *tc)
+{
+ serf_bucket_t *bkt, *stream;
+ serf_ssl_context_t *ssl_context;
+ apr_status_t status;
+
+ apr_pool_t *test_pool = test_setup();
+ serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL,
+ NULL);
+
+ stream = SERF_BUCKET_SIMPLE_STRING("", alloc);
+
+ bkt = serf_bucket_ssl_decrypt_create(stream, NULL,
+ alloc);
+ ssl_context = serf_bucket_ssl_decrypt_context_get(bkt);
+
+ bkt = serf_bucket_ssl_encrypt_create(stream, ssl_context,
+ alloc);
+
+ status = serf_ssl_use_default_certificates(ssl_context);
+
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ test_teardown(test_pool);
+}
+
+/* Test that loading a custom CA certificate file works. */
+static void test_ssl_load_cert_file(CuTest *tc)
+{
+ serf_ssl_certificate_t *cert = NULL;
+
+ apr_pool_t *test_pool = test_setup();
+ apr_status_t status = serf_ssl_load_cert_file(&cert, "test/serftestca.pem",
+ test_pool);
+
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertPtrNotNull(tc, cert);
+ test_teardown(test_pool);
+}
+
+/* Test that reading a custom CA certificate file works. */
+static void test_ssl_cert_subject(CuTest *tc)
+{
+ apr_hash_t *subject;
+ serf_ssl_certificate_t *cert = NULL;
+ apr_status_t status;
+
+ apr_pool_t *test_pool = test_setup();
+
+ status = serf_ssl_load_cert_file(&cert, "test/serftestca.pem", test_pool);
+
+ CuAssertIntEquals(tc, APR_SUCCESS, status);
+ CuAssertPtrNotNull(tc, cert);
+
+ subject = serf_ssl_cert_subject(cert, test_pool);
+ CuAssertStrEquals(tc, "Test Suite",
+ apr_hash_get(subject, "OU", APR_HASH_KEY_STRING));
+ CuAssertStrEquals(tc, "In Serf we trust, Inc.",
+ apr_hash_get(subject, "O", APR_HASH_KEY_STRING));
+ CuAssertStrEquals(tc, "Mechelen",
+ apr_hash_get(subject, "L", APR_HASH_KEY_STRING));
+ CuAssertStrEquals(tc, "Antwerp",
+ apr_hash_get(subject, "ST", APR_HASH_KEY_STRING));
+ CuAssertStrEquals(tc, "BE",
+ apr_hash_get(subject, "C", APR_HASH_KEY_STRING));
+ CuAssertStrEquals(tc, "serf@example.com",
+ apr_hash_get(subject, "E", APR_HASH_KEY_STRING));
+
+ test_teardown(test_pool);
+}
+
+CuSuite *test_ssl(void)
+{
+ CuSuite *suite = CuSuiteNew();
+
+ SUITE_ADD_TEST(suite, test_ssl_init);
+ SUITE_ADD_TEST(suite, test_ssl_load_cert_file);
+ SUITE_ADD_TEST(suite, test_ssl_cert_subject);
+
+ return suite;
+}
diff --git a/test/test_util.c b/test/test_util.c
new file mode 100644
index 0000000..9496195
--- /dev/null
+++ b/test/test_util.c
@@ -0,0 +1,214 @@
+/* Copyright 2002-2007 Justin Erenkrantz and Greg Stein
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr.h"
+#include "apr_pools.h"
+#include <stdlib.h>
+
+#include "serf.h"
+
+#include "test_serf.h"
+#include "server/test_server.h"
+
+
+/*****************************************************************************/
+/* Server setup function(s)
+ */
+
+#define SERV_URL "http://localhost:" SERV_PORT_STR
+
+static apr_status_t default_server_address(apr_sockaddr_t **address,
+ apr_pool_t *pool)
+{
+ return apr_sockaddr_info_get(address,
+ "localhost", APR_UNSPEC, SERV_PORT, 0,
+ pool);
+}
+
+static apr_status_t default_proxy_address(apr_sockaddr_t **address,
+ apr_pool_t *pool)
+{
+ return apr_sockaddr_info_get(address,
+ "localhost", APR_UNSPEC, PROXY_PORT, 0,
+ pool);
+}
+
+/* Default implementation of a serf_connection_closed_t callback. */
+static void default_closed_connection(serf_connection_t *conn,
+ void *closed_baton,
+ apr_status_t why,
+ apr_pool_t *pool)
+{
+ if (why) {
+ abort();
+ }
+}
+
+/* Default implementation of a serf_connection_setup_t callback. */
+static apr_status_t default_conn_setup(apr_socket_t *skt,
+ serf_bucket_t **input_bkt,
+ serf_bucket_t **output_bkt,
+ void *setup_baton,
+ apr_pool_t *pool)
+{
+ test_baton_t *ctx = setup_baton;
+
+ *input_bkt = serf_bucket_socket_create(skt, ctx->bkt_alloc);
+ return APR_SUCCESS;
+}
+
+
+static apr_status_t setup(test_baton_t **tb_p,
+ serf_connection_setup_t conn_setup,
+ int use_proxy,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ test_baton_t *tb;
+ apr_uri_t url;
+
+ tb = apr_pcalloc(pool, sizeof(*tb));
+ *tb_p = tb;
+
+ tb->pool = pool;
+ tb->context = serf_context_create(pool);
+ tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL);
+
+ status = default_server_address(&tb->serv_addr, pool);
+ if (status != APR_SUCCESS)
+ return status;
+
+ if (use_proxy) {
+ status = default_proxy_address(&tb->proxy_addr, pool);
+ if (status != APR_SUCCESS)
+ return status;
+
+ /* Configure serf to use the proxy server */
+ serf_config_proxy(tb->context, tb->proxy_addr);
+ }
+
+ status = apr_uri_parse(pool, SERV_URL, &url);
+ if (status != APR_SUCCESS)
+ return status;
+
+ status = serf_connection_create2(&tb->connection, tb->context,
+ url,
+ conn_setup ? conn_setup :
+ default_conn_setup,
+ tb,
+ default_closed_connection,
+ tb,
+ pool);
+
+ return status;
+}
+
+
+
+apr_status_t test_server_setup(test_baton_t **tb_p,
+ test_server_message_t *message_list,
+ apr_size_t message_count,
+ test_server_action_t *action_list,
+ apr_size_t action_count,
+ apr_int32_t options,
+ serf_connection_setup_t conn_setup,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ test_baton_t *tb;
+
+ status = setup(tb_p,
+ conn_setup,
+ FALSE,
+ pool);
+ if (status != APR_SUCCESS)
+ return status;
+
+ tb = *tb_p;
+
+ /* Prepare a server. */
+ status = test_start_server(&tb->serv_ctx, tb->serv_addr,
+ message_list, message_count,
+ action_list, action_count, options, pool);
+
+ return status;
+}
+
+apr_status_t
+test_server_proxy_setup(test_baton_t **tb_p,
+ test_server_message_t *serv_message_list,
+ apr_size_t serv_message_count,
+ test_server_action_t *serv_action_list,
+ apr_size_t serv_action_count,
+ test_server_message_t *proxy_message_list,
+ apr_size_t proxy_message_count,
+ test_server_action_t *proxy_action_list,
+ apr_size_t proxy_action_count,
+ apr_int32_t options,
+ serf_connection_setup_t conn_setup,
+ apr_pool_t *pool)
+{
+ apr_status_t status;
+ test_baton_t *tb;
+
+ status = setup(tb_p,
+ conn_setup,
+ TRUE,
+ pool);
+ if (status != APR_SUCCESS)
+ return status;
+
+ tb = *tb_p;
+
+ /* Prepare the server. */
+ status = test_start_server(&tb->serv_ctx, tb->serv_addr,
+ serv_message_list, serv_message_count,
+ serv_action_list, serv_action_count,
+ options, pool);
+ if (status != APR_SUCCESS)
+ return status;
+
+ /* Prepare the proxy. */
+ status = test_start_server(&tb->proxy_ctx, tb->proxy_addr,
+ proxy_message_list, proxy_message_count,
+ proxy_action_list, proxy_action_count,
+ options, pool);
+
+ return status;
+}
+
+apr_status_t test_server_teardown(test_baton_t *tb, apr_pool_t *pool)
+{
+ serf_connection_close(tb->connection);
+
+ if (tb->serv_ctx)
+ test_server_destroy(tb->serv_ctx, pool);
+ if (tb->proxy_ctx)
+ test_server_destroy(tb->proxy_ctx, pool);
+
+ return APR_SUCCESS;
+}
+
+apr_pool_t *test_setup(void)
+{
+ apr_pool_t *test_pool;
+ apr_pool_create(&test_pool, NULL);
+ return test_pool;
+}
+
+void test_teardown(apr_pool_t *test_pool)
+{
+ apr_pool_destroy(test_pool);
+}
diff --git a/test/testcases/chunked-empty.response b/test/testcases/chunked-empty.response
new file mode 100644
index 0000000..8794459
--- /dev/null
+++ b/test/testcases/chunked-empty.response
@@ -0,0 +1,11 @@
+HTTP/1.1 200 OK
+Date: Sat, 04 Sep 2004 07:57:30 GMT
+Server: Apache/2.0.49-dev
+Last-Modified: Fri, 20 Sep 2002 01:33:12 GMT
+ETag: "19f5cb-240-48f26600"
+Accept-Ranges: bytes
+Transfer-Encoding: chunked
+Content-Type: text/html; charset=ISO-8859-1
+
+0
+
diff --git a/test/testcases/chunked-trailers.response b/test/testcases/chunked-trailers.response
new file mode 100644
index 0000000..8d66687
--- /dev/null
+++ b/test/testcases/chunked-trailers.response
@@ -0,0 +1,14 @@
+HTTP/1.1 200 OK
+Transfer-Encoding: chunked
+
+1d
+this is 1 test.
+i am a test.
+f
+this is a test.
+2
+
+
+0
+Trailer-Test: f
+
diff --git a/test/testcases/chunked.response b/test/testcases/chunked.response
new file mode 100644
index 0000000..16c9d33
--- /dev/null
+++ b/test/testcases/chunked.response
@@ -0,0 +1,13 @@
+HTTP/1.1 200 OK
+Transfer-Encoding: chunked
+
+1d
+this is 1 test.
+i am a test.
+f
+this is a test.
+2
+
+
+0
+
diff --git a/test/testcases/deflate.response b/test/testcases/deflate.response
new file mode 100644
index 0000000..0c277bf
--- /dev/null
+++ b/test/testcases/deflate.response
Binary files differ
diff --git a/test/testcases/simple.request b/test/testcases/simple.request
new file mode 100644
index 0000000..faab00e
--- /dev/null
+++ b/test/testcases/simple.request
@@ -0,0 +1 @@
+this is a test.
diff --git a/test/testcases/simple.response b/test/testcases/simple.response
new file mode 100644
index 0000000..aa66cc3
--- /dev/null
+++ b/test/testcases/simple.response
@@ -0,0 +1,32 @@
+HTTP/1.1 200 OK
+Date: Sat, 04 Sep 2004 07:57:30 GMT
+Server: Apache/2.0.49-dev
+Last-Modified: Fri, 20 Sep 2002 01:33:12 GMT
+ETag: "19f5cb-240-48f26600"
+Accept-Ranges: bytes
+Content-Length: 599
+Content-Type: text/html; charset=ISO-8859-1
+
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
+<head>
+<title>scotch.ics.uci.edu</title>
+<!--base href="http://scotch.ics.uci.edu/" /-->
+<link href="default.css" rel="stylesheet" type="text/css" />
+</head>
+
+<body>
+
+<p>More to come!</p>
+
+<p><a href="manual/">Apache httpd 2.0 manual</a></p>
+
+<p><a href="CA.cert.pem">Trust our CA!</a></p>
+
+<p><img src="apache_pb.gif" alt="Powered by Apache!" /></p>
+
+</body>
+
+</html>