summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBert JW Regeer <xistence@0x58.com>2019-12-20 11:30:59 +0100
committerGitHub <noreply@github.com>2019-12-20 11:30:59 +0100
commitf11093a6b3240fc26830b6111e826128af7771c3 (patch)
treecedb1ee4aa0eed11b75f203034ab637164998acd
parent1809765a65076844b67a122b4a573bcba36e2dcd (diff)
parente586be8f43dbb266b9ef28bb285408b3ac3dacd4 (diff)
downloadwaitress-f11093a6b3240fc26830b6111e826128af7771c3.tar.gz
Merge pull request from GHSA-g2xc-35jw-c63pv1.4.0
HTTP Request Smuggling Fixes
-rw-r--r--CHANGES.txt145
-rw-r--r--HISTORY.txt79
-rw-r--r--setup.py2
-rw-r--r--waitress/parser.py132
-rw-r--r--waitress/receiver.py56
-rw-r--r--waitress/task.py12
-rw-r--r--waitress/tests/test_channel.py30
-rw-r--r--waitress/tests/test_functional.py245
-rw-r--r--waitress/tests/test_parser.py349
-rw-r--r--waitress/tests/test_receiver.py85
-rw-r--r--waitress/tests/test_task.py36
-rw-r--r--waitress/tests/test_utilities.py4
-rw-r--r--waitress/utilities.py29
13 files changed, 777 insertions, 427 deletions
diff --git a/CHANGES.txt b/CHANGES.txt
index f511dbb..acc8510 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,78 +1,77 @@
-1.3.1 (2019-08-27)
+1.4.0 (2019-12-20)
------------------
Bugfixes
~~~~~~~~
-- Waitress won't accidentally throw away part of the path if it starts with a
- double slash (``GET //testing/whatever HTTP/1.0``). WSGI applications will
- now receive a ``PATH_INFO`` in the environment that contains
- ``//testing/whatever`` as required. See
- https://github.com/Pylons/waitress/issues/260 and
- https://github.com/Pylons/waitress/pull/261
-
-
-1.3.0 (2019-04-22)
-------------------
-
-Deprecations
-~~~~~~~~~~~~
-
-- The ``send_bytes`` adjustment now defaults to ``1`` and is deprecated
- pending removal in a future release.
- and https://github.com/Pylons/waitress/pull/246
-
-Features
-~~~~~~~~
-
-- Add a new ``outbuf_high_watermark`` adjustment which is used to apply
- backpressure on the ``app_iter`` to avoid letting it spin faster than data
- can be written to the socket. This stabilizes responses that iterate quickly
- with a lot of data.
- See https://github.com/Pylons/waitress/pull/242
-
-- Stop early and close the ``app_iter`` when attempting to write to a closed
- socket due to a client disconnect. This should notify a long-lived streaming
- response when a client hangs up.
- See https://github.com/Pylons/waitress/pull/238
- and https://github.com/Pylons/waitress/pull/240
- and https://github.com/Pylons/waitress/pull/241
-
-- Adjust the flush to output ``SO_SNDBUF`` bytes instead of whatever was
- set in the ``send_bytes`` adjustment. ``send_bytes`` now only controls how
- much waitress will buffer internally before flushing to the kernel, whereas
- previously it used to also throttle how much data was sent to the kernel.
- This change enables a streaming ``app_iter`` containing small chunks to
- still be flushed efficiently.
- See https://github.com/Pylons/waitress/pull/246
-
-Bugfixes
-~~~~~~~~
-
-- Upon receiving a request that does not include HTTP/1.0 or HTTP/1.1 we will
- no longer set the version to the string value "None". See
- https://github.com/Pylons/waitress/pull/252 and
- https://github.com/Pylons/waitress/issues/110
-
-- When a client closes a socket unexpectedly there was potential for memory
- leaks in which data was written to the buffers after they were closed,
- causing them to reopen.
- See https://github.com/Pylons/waitress/pull/239
-
-- Fix the queue depth warnings to only show when all threads are busy.
- See https://github.com/Pylons/waitress/pull/243
- and https://github.com/Pylons/waitress/pull/247
-
-- Trigger the ``app_iter`` to close as part of shutdown. This will only be
- noticeable for users of the internal server api. In more typical operations
- the server will die before benefiting from these changes.
- See https://github.com/Pylons/waitress/pull/245
-
-- Fix a bug in which a streaming ``app_iter`` may never cleanup data that has
- already been sent. This would cause buffers in waitress to grow without
- bounds. These buffers now properly rotate and release their data.
- See https://github.com/Pylons/waitress/pull/242
-
-- Fix a bug in which non-seekable subclasses of ``io.IOBase`` would trigger
- an exception when passed to the ``wsgi.file_wrapper`` callback.
- See https://github.com/Pylons/waitress/pull/249
+- Waitress used to slam the door shut on HTTP pipelined requests without
+ setting the ``Connection: close`` header as appropriate in the response. This
+ is of course not very friendly. Waitress now explicitly sets the header when
+ responding with an internally generated error such as 400 Bad Request or 500
+ Internal Server Error to notify the remote client that it will be closing the
+ connection after the response is sent.
+
+- Waitress no longer allows any spaces to exist between the header field-name
+ and the colon. While waitress did not strip the space and thereby was not
+ vulnerable to any potential header field-name confusion, it should have sent
+ back a 400 Bad Request. See https://github.com/Pylons/waitress/issues/273
+
+Security Fixes
+~~~~~~~~~~~~~~
+
+- Waitress implemented a "MAY" part of the RFC7230
+ (https://tools.ietf.org/html/rfc7230#section-3.5) which states:
+
+ Although the line terminator for the start-line and header fields is
+ the sequence CRLF, a recipient MAY recognize a single LF as a line
+ terminator and ignore any preceding CR.
+
+ Unfortunately if a front-end server does not parse header fields with an LF
+ the same way as it does those with a CRLF it can lead to the front-end and
+ the back-end server parsing the same HTTP message in two different ways. This
+ can lead to a potential for HTTP request smuggling/splitting whereby Waitress
+ may see two requests while the front-end server only sees a single HTTP
+ message.
+
+ For more information I can highly recommend the blog post by ZeddYu Lu
+ https://blog.zeddyu.info/2019/12/08/HTTP-Smuggling-en/
+
+- Waitress used to treat LF the same as CRLF in ``Transfer-Encoding: chunked``
+ requests, while the maintainer doesn't believe this could lead to a security
+ issue, this is no longer supported and all chunks are now validated to be
+ properly framed with CRLF as required by RFC7230.
+
+- Waitress now validates that the ``Transfer-Encoding`` header contains only
+ transfer codes that it is able to decode. At the moment that includes the
+ only valid header value being ``chunked``.
+
+ That means that if the following header is sent:
+
+ ``Transfer-Encoding: gzip, chunked``
+
+ Waitress will send back a 501 Not Implemented with an error message stating
+ as such, as while Waitress supports ``chunked`` encoding it does not support
+ ``gzip`` and it is unable to pass that to the underlying WSGI environment
+ correctly.
+
+ Waitress DOES NOT implement support for ``Transfer-Encoding: identity``
+ eventhough ``identity`` was valid in RFC2616, it was removed in RFC7230.
+ Please update your clients to remove the ``Transfer-Encoding`` header if the
+ only transfer coding is ``identity`` or update your client to use
+ ``Transfer-Encoding: chunked`` instead of ``Transfer-Encoding: identity,
+ chunked``.
+
+- While validating the ``Transfer-Encoding`` header, Waitress now properly
+ handles line-folded ``Transfer-Encoding`` headers or those that contain
+ multiple comma seperated values. This closes a potential issue where a
+ front-end server may treat the request as being a chunked request (and thus
+ ignoring the Content-Length) and Waitress using the Content-Length as it was
+ looking for the single value ``chunked`` and did not support comma seperated
+ values.
+
+- Waitress used to explicitly set the Content-Length header to 0 if it was
+ unable to parse it as an integer (for example if the Content-Length header
+ was sent twice (and thus folded together), or was invalid) thereby allowing
+ for a potential request to be split and treated as two requests by HTTP
+ pipelining support in Waitress. If Waitress is now unable to parse the
+ Content-Length header, a 400 Bad Request is sent back to the client.
diff --git a/HISTORY.txt b/HISTORY.txt
index ab361b6..b7ef0aa 100644
--- a/HISTORY.txt
+++ b/HISTORY.txt
@@ -1,3 +1,82 @@
+1.3.1 (2019-08-27)
+------------------
+
+Bugfixes
+~~~~~~~~
+
+- Waitress won't accidentally throw away part of the path if it starts with a
+ double slash (``GET //testing/whatever HTTP/1.0``). WSGI applications will
+ now receive a ``PATH_INFO`` in the environment that contains
+ ``//testing/whatever`` as required. See
+ https://github.com/Pylons/waitress/issues/260 and
+ https://github.com/Pylons/waitress/pull/261
+
+
+1.3.0 (2019-04-22)
+------------------
+
+Deprecations
+~~~~~~~~~~~~
+
+- The ``send_bytes`` adjustment now defaults to ``1`` and is deprecated
+ pending removal in a future release.
+ and https://github.com/Pylons/waitress/pull/246
+
+Features
+~~~~~~~~
+
+- Add a new ``outbuf_high_watermark`` adjustment which is used to apply
+ backpressure on the ``app_iter`` to avoid letting it spin faster than data
+ can be written to the socket. This stabilizes responses that iterate quickly
+ with a lot of data.
+ See https://github.com/Pylons/waitress/pull/242
+
+- Stop early and close the ``app_iter`` when attempting to write to a closed
+ socket due to a client disconnect. This should notify a long-lived streaming
+ response when a client hangs up.
+ See https://github.com/Pylons/waitress/pull/238
+ and https://github.com/Pylons/waitress/pull/240
+ and https://github.com/Pylons/waitress/pull/241
+
+- Adjust the flush to output ``SO_SNDBUF`` bytes instead of whatever was
+ set in the ``send_bytes`` adjustment. ``send_bytes`` now only controls how
+ much waitress will buffer internally before flushing to the kernel, whereas
+ previously it used to also throttle how much data was sent to the kernel.
+ This change enables a streaming ``app_iter`` containing small chunks to
+ still be flushed efficiently.
+ See https://github.com/Pylons/waitress/pull/246
+
+Bugfixes
+~~~~~~~~
+
+- Upon receiving a request that does not include HTTP/1.0 or HTTP/1.1 we will
+ no longer set the version to the string value "None". See
+ https://github.com/Pylons/waitress/pull/252 and
+ https://github.com/Pylons/waitress/issues/110
+
+- When a client closes a socket unexpectedly there was potential for memory
+ leaks in which data was written to the buffers after they were closed,
+ causing them to reopen.
+ See https://github.com/Pylons/waitress/pull/239
+
+- Fix the queue depth warnings to only show when all threads are busy.
+ See https://github.com/Pylons/waitress/pull/243
+ and https://github.com/Pylons/waitress/pull/247
+
+- Trigger the ``app_iter`` to close as part of shutdown. This will only be
+ noticeable for users of the internal server api. In more typical operations
+ the server will die before benefiting from these changes.
+ See https://github.com/Pylons/waitress/pull/245
+
+- Fix a bug in which a streaming ``app_iter`` may never cleanup data that has
+ already been sent. This would cause buffers in waitress to grow without
+ bounds. These buffers now properly rotate and release their data.
+ See https://github.com/Pylons/waitress/pull/242
+
+- Fix a bug in which non-seekable subclasses of ``io.IOBase`` would trigger
+ an exception when passed to the ``wsgi.file_wrapper`` callback.
+ See https://github.com/Pylons/waitress/pull/249
+
1.2.1 (2019-01-25)
------------------
diff --git a/setup.py b/setup.py
index c2f286b..96846d1 100644
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,7 @@ testing_extras = [
setup(
name="waitress",
- version="1.3.1",
+ version="1.4.0",
author="Zope Foundation and Contributors",
author_email="zope-dev@zope.org",
maintainer="Pylons Project",
diff --git a/waitress/parser.py b/waitress/parser.py
index 815a389..dd591f2 100644
--- a/waitress/parser.py
+++ b/waitress/parser.py
@@ -19,24 +19,15 @@ processing but threads to do work.
import re
from io import BytesIO
-from waitress.compat import (
- tostr,
- urlparse,
- unquote_bytes_to_wsgi,
-)
-
from waitress.buffers import OverflowableBuffer
-
-from waitress.receiver import (
- FixedStreamReceiver,
- ChunkedReceiver,
-)
-
+from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse
+from waitress.receiver import ChunkedReceiver, FixedStreamReceiver
from waitress.utilities import (
- find_double_newline,
+ BadRequest,
RequestEntityTooLarge,
RequestHeaderFieldsTooLarge,
- BadRequest,
+ ServerNotImplemented,
+ find_double_newline,
)
@@ -44,6 +35,10 @@ class ParsingError(Exception):
pass
+class TransferEncodingNotImplemented(Exception):
+ pass
+
+
class HTTPRequestParser(object):
"""A structure that collects the HTTP request.
@@ -85,18 +80,48 @@ class HTTPRequestParser(object):
"""
if self.completed:
return 0 # Can't consume any more.
+
datalen = len(data)
br = self.body_rcv
if br is None:
# In header.
+ max_header = self.adj.max_request_header_size
+
s = self.header_plus + data
index = find_double_newline(s)
+ consumed = 0
+
+ if index >= 0:
+ # If the headers have ended, and we also have part of the body
+ # message in data we still want to validate we aren't going
+ # over our limit for received headers.
+ self.header_bytes_received += index
+ consumed = datalen - (len(s) - index)
+ else:
+ self.header_bytes_received += datalen
+ consumed = datalen
+
+ # If the first line + headers is over the max length, we return a
+ # RequestHeaderFieldsTooLarge error rather than continuing to
+ # attempt to parse the headers.
+ if self.header_bytes_received >= max_header:
+ self.parse_header(b"GET / HTTP/1.0\r\n")
+ self.error = RequestHeaderFieldsTooLarge(
+ "exceeds max_header of %s" % max_header
+ )
+ self.completed = True
+ return consumed
+
if index >= 0:
# Header finished.
header_plus = s[:index]
- consumed = len(data) - (len(s) - index)
- # Remove preceeding blank lines.
+
+ # Remove preceeding blank lines. This is suggested by
+ # https://tools.ietf.org/html/rfc7230#section-3.5 to support
+ # clients sending an extra CR LF after another request when
+ # using HTTP pipelining
header_plus = header_plus.lstrip()
+
if not header_plus:
self.empty = True
self.completed = True
@@ -106,43 +131,39 @@ class HTTPRequestParser(object):
except ParsingError as e:
self.error = BadRequest(e.args[0])
self.completed = True
+ except TransferEncodingNotImplemented as e:
+ self.error = ServerNotImplemented(e.args[0])
+ self.completed = True
else:
if self.body_rcv is None:
# no content-length header and not a t-e: chunked
# request
self.completed = True
+
if self.content_length > 0:
max_body = self.adj.max_request_body_size
# we won't accept this request if the content-length
# is too large
+
if self.content_length >= max_body:
self.error = RequestEntityTooLarge(
"exceeds max_body of %s" % max_body
)
self.completed = True
self.headers_finished = True
+
return consumed
- else:
- # Header not finished yet.
- self.header_bytes_received += datalen
- max_header = self.adj.max_request_header_size
- if self.header_bytes_received >= max_header:
- # malformed header, we need to construct some request
- # on our own. we disregard the incoming(?) requests HTTP
- # version and just use 1.0. IOW someone just sent garbage
- # over the wire
- self.parse_header(b"GET / HTTP/1.0\n")
- self.error = RequestHeaderFieldsTooLarge(
- "exceeds max_header of %s" % max_header
- )
- self.completed = True
- self.header_plus = s
- return datalen
+
+ # Header not finished yet.
+ self.header_plus = s
+
+ return datalen
else:
# In body.
consumed = br.received(data)
self.body_bytes_received += consumed
max_body = self.adj.max_request_body_size
+
if self.body_bytes_received >= max_body:
# this will only be raised during t-e: chunked requests
self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body)
@@ -154,6 +175,7 @@ class HTTPRequestParser(object):
elif br.completed:
# The request (with the body) is ready to use.
self.completed = True
+
if self.chunked:
# We've converted the chunked transfer encoding request
# body into a normal request body, so we know its content
@@ -162,6 +184,7 @@ class HTTPRequestParser(object):
# appear to the client to be an entirely non-chunked HTTP
# request with a valid content-length.
self.headers["CONTENT_LENGTH"] = str(br.__len__())
+
return consumed
def parse_header(self, header_plus):
@@ -169,13 +192,15 @@ class HTTPRequestParser(object):
Parses the header_plus block of text (the headers plus the
first line of the request).
"""
- index = header_plus.find(b"\n")
+ index = header_plus.find(b"\r\n")
if index >= 0:
first_line = header_plus[:index].rstrip()
- header = header_plus[index + 1 :]
+ header = header_plus[index + 2 :]
else:
- first_line = header_plus.rstrip()
- header = b""
+ raise ParsingError("HTTP message header invalid")
+
+ if b"\r" in first_line or b"\n" in first_line:
+ raise ParsingError("Bare CR or LF found in HTTP message")
self.first_line = first_line # for testing
@@ -186,6 +211,10 @@ class HTTPRequestParser(object):
index = line.find(b":")
if index > 0:
key = line[:index]
+
+ if key != key.strip():
+ raise ParsingError("Invalid whitespace after field-name")
+
if b"_" in key:
continue
value = line[index + 1 :].strip()
@@ -226,10 +255,31 @@ class HTTPRequestParser(object):
# should not see the HTTP_TRANSFER_ENCODING header; we pop it
# here
te = headers.pop("TRANSFER_ENCODING", "")
- if te.lower() == "chunked":
+
+ encodings = [encoding.strip().lower() for encoding in te.split(",") if encoding]
+
+ for encoding in encodings:
+ # Out of the transfer-codings listed in
+ # https://tools.ietf.org/html/rfc7230#section-4 we only support
+ # chunked at this time.
+
+ # Note: the identity transfer-coding was removed in RFC7230:
+ # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus
+ # not supported
+ if encoding not in {"chunked"}:
+ raise TransferEncodingNotImplemented(
+ "Transfer-Encoding requested is not supported."
+ )
+
+ if encodings and encodings[-1] == "chunked":
self.chunked = True
buf = OverflowableBuffer(self.adj.inbuf_overflow)
self.body_rcv = ChunkedReceiver(buf)
+ elif encodings: # pragma: nocover
+ raise TransferEncodingNotImplemented(
+ "Transfer-Encoding requested is not supported."
+ )
+
expect = headers.get("EXPECT", "").lower()
self.expect_continue = expect == "100-continue"
if connection.lower() == "close":
@@ -239,7 +289,8 @@ class HTTPRequestParser(object):
try:
cl = int(headers.get("CONTENT_LENGTH", 0))
except ValueError:
- cl = 0
+ raise ParsingError("Content-Length is invalid")
+
self.content_length = cl
if cl > 0:
buf = OverflowableBuffer(self.adj.inbuf_overflow)
@@ -299,8 +350,11 @@ def get_header_lines(header):
Splits the header into lines, putting multi-line headers together.
"""
r = []
- lines = header.split(b"\n")
+ lines = header.split(b"\r\n")
for line in lines:
+ if b"\r" in line or b"\n" in line:
+ raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line))
+
if line.startswith((b" ", b"\t")):
if not r:
# https://corte.si/posts/code/pathod/pythonservers/index.html
diff --git a/waitress/receiver.py b/waitress/receiver.py
index 380b8fd..5d1568d 100644
--- a/waitress/receiver.py
+++ b/waitress/receiver.py
@@ -14,9 +14,7 @@
"""Data Chunk Receiver
"""
-from waitress.utilities import find_double_newline
-
-from waitress.utilities import BadRequest
+from waitress.utilities import BadRequest, find_double_newline
class FixedStreamReceiver(object):
@@ -35,18 +33,23 @@ class FixedStreamReceiver(object):
def received(self, data):
"See IStreamConsumer"
rm = self.remain
+
if rm < 1:
self.completed = True # Avoid any chance of spinning
+
return 0
datalen = len(data)
+
if rm <= datalen:
self.buf.append(data[:rm])
self.remain = 0
self.completed = True
+
return rm
else:
self.buf.append(data)
self.remain -= datalen
+
return datalen
def getfile(self):
@@ -59,7 +62,9 @@ class FixedStreamReceiver(object):
class ChunkedReceiver(object):
chunk_remainder = 0
+ validate_chunk_end = False
control_line = b""
+ chunk_end = b""
all_chunks_received = False
trailer = b""
completed = False
@@ -76,35 +81,64 @@ class ChunkedReceiver(object):
def received(self, s):
# Returns the number of bytes consumed.
+
if self.completed:
return 0
orig_size = len(s)
+
while s:
rm = self.chunk_remainder
+
if rm > 0:
# Receive the remainder of a chunk.
to_write = s[:rm]
self.buf.append(to_write)
written = len(to_write)
s = s[written:]
+
self.chunk_remainder -= written
+
+ if self.chunk_remainder == 0:
+ self.validate_chunk_end = True
+ elif self.validate_chunk_end:
+ s = self.chunk_end + s
+
+ pos = s.find(b"\r\n")
+
+ if pos < 0 and len(s) < 2:
+ self.chunk_end = s
+ s = b""
+ else:
+ self.chunk_end = b""
+ if pos == 0:
+ # Chop off the terminating CR LF from the chunk
+ s = s[2:]
+ else:
+ self.error = BadRequest("Chunk not properly terminated")
+ self.all_chunks_received = True
+
+ # Always exit this loop
+ self.validate_chunk_end = False
elif not self.all_chunks_received:
# Receive a control line.
s = self.control_line + s
- pos = s.find(b"\n")
+ pos = s.find(b"\r\n")
+
if pos < 0:
# Control line not finished.
self.control_line = s
- s = ""
+ s = b""
else:
# Control line finished.
line = s[:pos]
- s = s[pos + 1 :]
+ s = s[pos + 2 :]
self.control_line = b""
line = line.strip()
+
if line:
# Begin a new chunk.
semi = line.find(b";")
+
if semi >= 0:
# discard extension info.
line = line[:semi]
@@ -113,6 +147,7 @@ class ChunkedReceiver(object):
except ValueError: # garbage in input
self.error = BadRequest("garbage in chunked encoding input")
sz = 0
+
if sz > 0:
# Start a new chunk.
self.chunk_remainder = sz
@@ -123,15 +158,14 @@ class ChunkedReceiver(object):
else:
# Receive the trailer.
trailer = self.trailer + s
+
if trailer.startswith(b"\r\n"):
# No trailer.
self.completed = True
+
return orig_size - (len(trailer) - 2)
- elif trailer.startswith(b"\n"):
- # No trailer.
- self.completed = True
- return orig_size - (len(trailer) - 1)
pos = find_double_newline(trailer)
+
if pos < 0:
# Trailer not finished.
self.trailer = trailer
@@ -140,7 +174,9 @@ class ChunkedReceiver(object):
# Finished the trailer.
self.completed = True
self.trailer = trailer[:pos]
+
return orig_size - (len(trailer) - pos)
+
return orig_size
def getfile(self):
diff --git a/waitress/task.py b/waitress/task.py
index c12b0f9..8e7ab18 100644
--- a/waitress/task.py
+++ b/waitress/task.py
@@ -353,14 +353,10 @@ class ErrorTask(Task):
status, headers, body = e.to_response()
self.status = status
self.response_headers.extend(headers)
- if self.version == "1.1":
- connection = self.request.headers.get("CONNECTION", "").lower()
- if connection == "close":
- self.response_headers.append(("Connection", "close"))
- # under HTTP 1.1 keep-alive is default, no need to set the header
- else:
- # HTTP 1.0
- self.response_headers.append(("Connection", "close"))
+ # We need to explicitly tell the remote client we are closing the
+ # connection, because self.close_on_finish is set, and we are going to
+ # slam the door in the clients face.
+ self.response_headers.append(("Connection", "close"))
self.close_on_finish = True
self.content_length = len(body)
self.write(tobytes(body))
diff --git a/waitress/tests/test_channel.py b/waitress/tests/test_channel.py
index 252fd1e..14ef5a0 100644
--- a/waitress/tests/test_channel.py
+++ b/waitress/tests/test_channel.py
@@ -423,7 +423,7 @@ class TestHTTPChannel(unittest.TestCase):
def test_received(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.server.tasks, [inst])
self.assertTrue(inst.requests)
@@ -438,7 +438,7 @@ class TestHTTPChannel(unittest.TestCase):
inst.request = preq
preq.completed = False
preq.empty = True
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.requests, ())
self.assertEqual(inst.server.tasks, [])
@@ -449,7 +449,7 @@ class TestHTTPChannel(unittest.TestCase):
inst.request = preq
preq.completed = True
preq.empty = True
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, None)
self.assertEqual(inst.server.tasks, [])
@@ -460,7 +460,7 @@ class TestHTTPChannel(unittest.TestCase):
inst.request = preq
preq.completed = True
preq.error = True
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, None)
self.assertEqual(len(inst.server.tasks), 1)
self.assertTrue(inst.requests)
@@ -473,24 +473,10 @@ class TestHTTPChannel(unittest.TestCase):
preq.completed = True
preq.empty = True
preq.connection_close = True
- inst.received(b"GET / HTTP/1.1\n\n" + b"a" * 50000)
+ inst.received(b"GET / HTTP/1.1\r\n\r\n" + b"a" * 50000)
self.assertEqual(inst.request, None)
self.assertEqual(inst.server.tasks, [])
- def test_received_preq_completed_n_lt_data(self):
- inst, sock, map = self._makeOneWithMap()
- inst.server = DummyServer()
- preq = DummyParser()
- inst.request = preq
- preq.completed = True
- preq.empty = False
- line = b"GET / HTTP/1.1\n\n"
- preq.retval = len(line)
- inst.received(line + line)
- self.assertEqual(inst.request, None)
- self.assertEqual(len(inst.requests), 2)
- self.assertEqual(len(inst.server.tasks), 1)
-
def test_received_headers_finished_expect_continue_false(self):
inst, sock, map = self._makeOneWithMap()
inst.server = DummyServer()
@@ -501,7 +487,7 @@ class TestHTTPChannel(unittest.TestCase):
preq.completed = False
preq.empty = False
preq.retval = 1
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(inst.outbufs[0].get(100), b"")
@@ -515,7 +501,7 @@ class TestHTTPChannel(unittest.TestCase):
preq.headers_finished = True
preq.completed = False
preq.empty = False
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(sock.sent, b"HTTP/1.1 100 Continue\r\n\r\n")
@@ -532,7 +518,7 @@ class TestHTTPChannel(unittest.TestCase):
preq.completed = False
preq.empty = False
inst.sent_continue = True
- inst.received(b"GET / HTTP/1.1\n\n")
+ inst.received(b"GET / HTTP/1.1\r\n\r\n")
self.assertEqual(inst.request, preq)
self.assertEqual(inst.server.tasks, [])
self.assertEqual(sock.sent, b"")
diff --git a/waitress/tests/test_functional.py b/waitress/tests/test_functional.py
index bfe5072..8f4b262 100644
--- a/waitress/tests/test_functional.py
+++ b/waitress/tests/test_functional.py
@@ -179,7 +179,7 @@ class EchoTests(object):
return line, headers, echo.parse_response(body)
def test_date_and_server(self):
- to_send = "GET / HTTP/1.0\n" "Content-Length: 0\n\n"
+ to_send = "GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -191,7 +191,7 @@ class EchoTests(object):
def test_bad_host_header(self):
# https://corte.si/posts/code/pathod/pythonservers/index.html
- to_send = "GET / HTTP/1.0\n" " Host: 0\n\n"
+ to_send = "GET / HTTP/1.0\r\n Host: 0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -202,7 +202,7 @@ class EchoTests(object):
self.assertTrue(headers.get("date"))
def test_send_with_body(self):
- to_send = "GET / HTTP/1.0\n" "Content-Length: 5\n\n"
+ to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
to_send += "hello"
to_send = tobytes(to_send)
self.connect()
@@ -214,7 +214,7 @@ class EchoTests(object):
self.assertEqual(echo.body, b"hello")
def test_send_empty_body(self):
- to_send = "GET / HTTP/1.0\n" "Content-Length: 0\n\n"
+ to_send = "GET / HTTP/1.0\r\nContent-Length: 0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -241,12 +241,12 @@ class EchoTests(object):
self.sock = orig_sock
def test_without_crlf(self):
- data = "Echo\nthis\r\nplease"
+ data = "Echo\r\nthis\r\nplease"
s = tobytes(
- "GET / HTTP/1.0\n"
- "Connection: close\n"
- "Content-Length: %d\n"
- "\n"
+ "GET / HTTP/1.0\r\n"
+ "Connection: close\r\n"
+ "Content-Length: %d\r\n"
+ "\r\n"
"%s" % (len(data), data)
)
self.connect()
@@ -262,7 +262,7 @@ class EchoTests(object):
# 1024 characters.
body = "This string has 32 characters.\r\n" * 32
s = tobytes(
- "GET / HTTP/1.0\n" "Content-Length: %d\n" "\n" "%s" % (len(body), body)
+ "GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(body), body)
)
self.connect()
self.sock.send(s)
@@ -289,7 +289,7 @@ class EchoTests(object):
h.close()
def test_chunking_request_without_content(self):
- header = tobytes("GET / HTTP/1.1\n" "Transfer-Encoding: chunked\n\n")
+ header = tobytes("GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n")
self.connect()
self.sock.send(header)
self.sock.send(b"0\r\n\r\n")
@@ -304,13 +304,14 @@ class EchoTests(object):
control_line = b"20;\r\n" # 20 hex = 32 dec
s = b"This string has 32 characters.\r\n"
expected = s * 12
- header = tobytes("GET / HTTP/1.1\n" "Transfer-Encoding: chunked\n\n")
+ header = tobytes("GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n")
self.connect()
self.sock.send(header)
fp = self.sock.makefile("rb", 0)
for n in range(12):
self.sock.send(control_line)
self.sock.send(s)
+ self.sock.send(b"\r\n") # End the chunk
self.sock.send(b"0\r\n\r\n")
line, headers, echo = self._read_echo(fp)
self.assertline(line, "200", "OK", "HTTP/1.1")
@@ -321,11 +322,34 @@ class EchoTests(object):
def test_broken_chunked_encoding(self):
control_line = "20;\r\n" # 20 hex = 32 dec
s = "This string has 32 characters.\r\n"
- to_send = "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n"
- to_send += control_line + s
+ to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
+ to_send += control_line + s + "\r\n"
# garbage in input
- to_send += "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n"
+ to_send += "garbage\r\n"
+ to_send = tobytes(to_send)
+ self.connect()
+ self.sock.send(to_send)
+ fp = self.sock.makefile("rb", 0)
+ line, headers, response_body = read_http(fp)
+ # receiver caught garbage and turned it into a 400
+ self.assertline(line, "400", "Bad Request", "HTTP/1.1")
+ cl = int(headers["content-length"])
+ self.assertEqual(cl, len(response_body))
+ self.assertEqual(
+ sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"]
+ )
+ self.assertEqual(headers["content-type"], "text/plain")
+ # connection has been closed
+ self.send_check_error(to_send)
+ self.assertRaises(ConnectionClosed, read_http, fp)
+
+ def test_broken_chunked_encoding_missing_chunk_end(self):
+ control_line = "20;\r\n" # 20 hex = 32 dec
+ s = "This string has 32 characters.\r\n"
+ to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
to_send += control_line + s
+ # garbage in input
+ to_send += "garbage"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -335,8 +359,9 @@ class EchoTests(object):
self.assertline(line, "400", "Bad Request", "HTTP/1.1")
cl = int(headers["content-length"])
self.assertEqual(cl, len(response_body))
+ self.assertTrue(b"Chunk not properly terminated" in response_body)
self.assertEqual(
- sorted(headers.keys()), ["content-length", "content-type", "date", "server"]
+ sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"]
)
self.assertEqual(headers["content-type"], "text/plain")
# connection has been closed
@@ -347,7 +372,7 @@ class EchoTests(object):
# Handling of Keep-Alive within HTTP 1.0
data = "Default: Don't keep me alive"
s = tobytes(
- "GET / HTTP/1.0\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data)
+ "GET / HTTP/1.0\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
@@ -365,10 +390,10 @@ class EchoTests(object):
# the corresponding header
data = "Keep me alive"
s = tobytes(
- "GET / HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: %d\n"
- "\n"
+ "GET / HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n"
+ "\r\n"
"%s" % (len(data), data)
)
self.connect()
@@ -385,7 +410,7 @@ class EchoTests(object):
# All connections are kept alive, unless stated otherwise
data = "Default: Keep me alive"
s = tobytes(
- "GET / HTTP/1.1\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data)
+ "GET / HTTP/1.1\r\nContent-Length: %d\r\n\r\n%s" % (len(data), data)
)
self.connect()
self.sock.send(s)
@@ -398,10 +423,10 @@ class EchoTests(object):
# Explicitly set keep-alive
data = "Default: Keep me alive"
s = tobytes(
- "GET / HTTP/1.1\n"
- "Connection: keep-alive\n"
- "Content-Length: %d\n"
- "\n"
+ "GET / HTTP/1.1\r\n"
+ "Connection: keep-alive\r\n"
+ "Content-Length: %d\r\n"
+ "\r\n"
"%s" % (len(data), data)
)
self.connect()
@@ -415,10 +440,10 @@ class EchoTests(object):
# specifying Connection: close explicitly
data = "Don't keep me alive"
s = tobytes(
- "GET / HTTP/1.1\n"
- "Connection: close\n"
- "Content-Length: %d\n"
- "\n"
+ "GET / HTTP/1.1\r\n"
+ "Connection: close\r\n"
+ "Content-Length: %d\r\n"
+ "\r\n"
"%s" % (len(data), data)
)
self.connect()
@@ -430,12 +455,12 @@ class EchoTests(object):
def test_proxy_headers(self):
to_send = (
- "GET / HTTP/1.0\n"
- "Content-Length: 0\n"
- "Host: www.google.com:8080\n"
- "X-Forwarded-For: 192.168.1.1\n"
- "X-Forwarded-Proto: https\n"
- "X-Forwarded-Port: 5000\n\n"
+ "GET / HTTP/1.0\r\n"
+ "Content-Length: 0\r\n"
+ "Host: www.google.com:8080\r\n"
+ "X-Forwarded-For: 192.168.1.1\r\n"
+ "X-Forwarded-Proto: https\r\n"
+ "X-Forwarded-Port: 5000\r\n\r\n"
)
to_send = tobytes(to_send)
self.connect()
@@ -507,11 +532,11 @@ class ExpectContinueTests(object):
# specifying Connection: close explicitly
data = "I have expectations"
to_send = tobytes(
- "GET / HTTP/1.1\n"
- "Connection: close\n"
- "Content-Length: %d\n"
- "Expect: 100-continue\n"
- "\n"
+ "GET / HTTP/1.1\r\n"
+ "Connection: close\r\n"
+ "Content-Length: %d\r\n"
+ "Expect: 100-continue\r\n"
+ "\r\n"
"%s" % (len(data), data)
)
self.connect()
@@ -546,10 +571,10 @@ class BadContentLengthTests(object):
# check to see if server closes connection when body is too short
# for cl header
to_send = tobytes(
- "GET /short_body HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /short_body HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -572,10 +597,10 @@ class BadContentLengthTests(object):
# check server doesnt close connection when body is too short
# for cl header
to_send = tobytes(
- "GET /long_body HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /long_body HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -611,9 +636,9 @@ class NoContentLengthTests(object):
def test_http10_generator(self):
body = string.ascii_letters
to_send = (
- "GET / HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: %d\n\n" % len(body)
+ "GET / HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
to_send = tobytes(to_send)
@@ -633,9 +658,9 @@ class NoContentLengthTests(object):
def test_http10_list(self):
body = string.ascii_letters
to_send = (
- "GET /list HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: %d\n\n" % len(body)
+ "GET /list HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
to_send = tobytes(to_send)
@@ -656,9 +681,9 @@ class NoContentLengthTests(object):
def test_http10_listlentwo(self):
body = string.ascii_letters
to_send = (
- "GET /list_lentwo HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: %d\n\n" % len(body)
+ "GET /list_lentwo HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n\r\n" % len(body)
)
to_send += body
to_send = tobytes(to_send)
@@ -677,7 +702,7 @@ class NoContentLengthTests(object):
def test_http11_generator(self):
body = string.ascii_letters
- to_send = "GET / HTTP/1.1\n" "Content-Length: %s\n\n" % len(body)
+ to_send = "GET / HTTP/1.1\r\nContent-Length: %s\r\n\r\n" % len(body)
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -698,7 +723,7 @@ class NoContentLengthTests(object):
def test_http11_list(self):
body = string.ascii_letters
- to_send = "GET /list HTTP/1.1\n" "Content-Length: %d\n\n" % len(body)
+ to_send = "GET /list HTTP/1.1\r\nContent-Length: %d\r\n\r\n" % len(body)
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -716,7 +741,7 @@ class NoContentLengthTests(object):
def test_http11_listlentwo(self):
body = string.ascii_letters
- to_send = "GET /list_lentwo HTTP/1.1\n" "Content-Length: %s\n\n" % len(body)
+ to_send = "GET /list_lentwo HTTP/1.1\r\nContent-Length: %s\r\n\r\n" % len(body)
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -749,10 +774,10 @@ class WriteCallbackTests(object):
# check to see if server closes connection when body is too short
# for cl header
to_send = tobytes(
- "GET /short_body HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /short_body HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -773,10 +798,10 @@ class WriteCallbackTests(object):
# check server doesnt close connection when body is too long
# for cl header
to_send = tobytes(
- "GET /long_body HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /long_body HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -796,10 +821,10 @@ class WriteCallbackTests(object):
# check server doesnt close connection when body is equal to
# cl header
to_send = tobytes(
- "GET /equal_body HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /equal_body HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -819,10 +844,10 @@ class WriteCallbackTests(object):
def test_no_content_length(self):
# wtf happens when there's no content-length
to_send = tobytes(
- "GET /no_content_length HTTP/1.0\n"
- "Connection: Keep-Alive\n"
- "Content-Length: 0\n"
- "\n"
+ "GET /no_content_length HTTP/1.0\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: 0\r\n"
+ "\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -853,7 +878,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_wrong_cl_http10(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.0\n" "Content-Length: 5\n\n"
+ to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -872,7 +897,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_wrong_cl_http10_keepalive(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.0\n" "Content-Length: 5\n" "Connection: Keep-Alive\n\n"
+ to_send = "GET / HTTP/1.0\r\nContent-Length: 5\r\nConnection: Keep-Alive\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -893,7 +918,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_no_cl_http10(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.0\n\n"
+ to_send = "GET / HTTP/1.0\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -909,7 +934,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_no_cl_http10_keepalive(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.0\nConnection: Keep-Alive\n\n"
+ to_send = "GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -932,7 +957,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_wrong_cl_http11(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.1\n" "Content-Length: 5\n\n"
+ to_send = "GET / HTTP/1.1\r\nContent-Length: 5\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -954,7 +979,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_wrong_cl_http11_connclose(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.1\nContent-Length: 5\nConnection: close\n\n"
+ to_send = "GET / HTTP/1.1\r\nContent-Length: 5\r\nConnection: close\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -971,7 +996,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_no_cl_http11(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.1\n\n"
+ to_send = "GET / HTTP/1.1\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -996,7 +1021,7 @@ class TooLargeTests(object):
def test_request_body_too_large_with_no_cl_http11_connclose(self):
body = "a" * self.toobig
- to_send = "GET / HTTP/1.1\nConnection: close\n\n"
+ to_send = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n"
to_send += body
to_send = tobytes(to_send)
self.connect()
@@ -1014,7 +1039,7 @@ class TooLargeTests(object):
def test_request_body_too_large_chunked_encoding(self):
control_line = "20;\r\n" # 20 hex = 32 dec
s = "This string has 32 characters.\r\n"
- to_send = "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n"
+ to_send = "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"
repeat = control_line + s
to_send += repeat * ((self.toobig // len(repeat)) + 1)
to_send = tobytes(to_send)
@@ -1042,7 +1067,7 @@ class InternalServerErrorTests(object):
self.stop_subprocess()
def test_before_start_response_http_10(self):
- to_send = "GET /before_start_response HTTP/1.0\n\n"
+ to_send = "GET /before_start_response HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1058,7 +1083,7 @@ class InternalServerErrorTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_before_start_response_http_11(self):
- to_send = "GET /before_start_response HTTP/1.1\n\n"
+ to_send = "GET /before_start_response HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1069,7 +1094,7 @@ class InternalServerErrorTests(object):
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
- sorted(headers.keys()), ["content-length", "content-type", "date", "server"]
+ sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"]
)
# connection has been closed
self.send_check_error(to_send)
@@ -1077,7 +1102,7 @@ class InternalServerErrorTests(object):
def test_before_start_response_http_11_close(self):
to_send = tobytes(
- "GET /before_start_response HTTP/1.1\n" "Connection: close\n\n"
+ "GET /before_start_response HTTP/1.1\r\nConnection: close\r\n\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -1097,7 +1122,7 @@ class InternalServerErrorTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_start_response_http10(self):
- to_send = "GET /after_start_response HTTP/1.0\n\n"
+ to_send = "GET /after_start_response HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1117,7 +1142,7 @@ class InternalServerErrorTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_start_response_http11(self):
- to_send = "GET /after_start_response HTTP/1.1\n\n"
+ to_send = "GET /after_start_response HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1128,7 +1153,7 @@ class InternalServerErrorTests(object):
self.assertEqual(cl, len(response_body))
self.assertTrue(response_body.startswith(b"Internal Server Error"))
self.assertEqual(
- sorted(headers.keys()), ["content-length", "content-type", "date", "server"]
+ sorted(headers.keys()), ["connection", "content-length", "content-type", "date", "server"]
)
# connection has been closed
self.send_check_error(to_send)
@@ -1136,7 +1161,7 @@ class InternalServerErrorTests(object):
def test_after_start_response_http11_close(self):
to_send = tobytes(
- "GET /after_start_response HTTP/1.1\n" "Connection: close\n\n"
+ "GET /after_start_response HTTP/1.1\r\nConnection: close\r\n\r\n"
)
self.connect()
self.sock.send(to_send)
@@ -1156,7 +1181,7 @@ class InternalServerErrorTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_after_write_cb(self):
- to_send = "GET /after_write_cb HTTP/1.1\n\n"
+ to_send = "GET /after_write_cb HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1169,7 +1194,7 @@ class InternalServerErrorTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_in_generator(self):
- to_send = "GET /in_generator HTTP/1.1\n\n"
+ to_send = "GET /in_generator HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
self.sock.send(to_send)
@@ -1192,7 +1217,7 @@ class FileWrapperTests(object):
self.stop_subprocess()
def test_filelike_http11(self):
- to_send = "GET /filelike HTTP/1.1\n\n"
+ to_send = "GET /filelike HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1210,7 +1235,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_filelike_nocl_http11(self):
- to_send = "GET /filelike_nocl HTTP/1.1\n\n"
+ to_send = "GET /filelike_nocl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1228,7 +1253,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_filelike_shortcl_http11(self):
- to_send = "GET /filelike_shortcl HTTP/1.1\n\n"
+ to_send = "GET /filelike_shortcl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1247,7 +1272,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_filelike_longcl_http11(self):
- to_send = "GET /filelike_longcl HTTP/1.1\n\n"
+ to_send = "GET /filelike_longcl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1265,7 +1290,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_notfilelike_http11(self):
- to_send = "GET /notfilelike HTTP/1.1\n\n"
+ to_send = "GET /notfilelike HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1283,7 +1308,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_notfilelike_iobase_http11(self):
- to_send = "GET /notfilelike_iobase HTTP/1.1\n\n"
+ to_send = "GET /notfilelike_iobase HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1301,7 +1326,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_notfilelike_nocl_http11(self):
- to_send = "GET /notfilelike_nocl HTTP/1.1\n\n"
+ to_send = "GET /notfilelike_nocl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1318,7 +1343,7 @@ class FileWrapperTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_shortcl_http11(self):
- to_send = "GET /notfilelike_shortcl HTTP/1.1\n\n"
+ to_send = "GET /notfilelike_shortcl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1337,7 +1362,7 @@ class FileWrapperTests(object):
# connection has not been closed
def test_notfilelike_longcl_http11(self):
- to_send = "GET /notfilelike_longcl HTTP/1.1\n\n"
+ to_send = "GET /notfilelike_longcl HTTP/1.1\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1356,7 +1381,7 @@ class FileWrapperTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_filelike_http10(self):
- to_send = "GET /filelike HTTP/1.0\n\n"
+ to_send = "GET /filelike HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1375,7 +1400,7 @@ class FileWrapperTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_filelike_nocl_http10(self):
- to_send = "GET /filelike_nocl HTTP/1.0\n\n"
+ to_send = "GET /filelike_nocl HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1394,7 +1419,7 @@ class FileWrapperTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_http10(self):
- to_send = "GET /notfilelike HTTP/1.0\n\n"
+ to_send = "GET /notfilelike HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1413,7 +1438,7 @@ class FileWrapperTests(object):
self.assertRaises(ConnectionClosed, read_http, fp)
def test_notfilelike_nocl_http10(self):
- to_send = "GET /notfilelike_nocl HTTP/1.0\n\n"
+ to_send = "GET /notfilelike_nocl HTTP/1.0\r\n\r\n"
to_send = tobytes(to_send)
self.connect()
@@ -1582,7 +1607,7 @@ def read_http(fp): # pragma: no cover
header_lines = []
while True:
line = fp.readline()
- if line in (b"\r\n", b"\n", b""):
+ if line in (b"\r\n", b"\r\n", b""):
break
else:
header_lines.append(line)
diff --git a/waitress/tests/test_parser.py b/waitress/tests/test_parser.py
index 2e81981..1a95e23 100644
--- a/waitress/tests/test_parser.py
+++ b/waitress/tests/test_parser.py
@@ -15,10 +15,7 @@
"""
import unittest
-from waitress.compat import (
- text_,
- tobytes,
-)
+from waitress.compat import text_, tobytes
class TestHTTPRequestParser(unittest.TestCase):
@@ -40,47 +37,48 @@ class TestHTTPRequestParser(unittest.TestCase):
result = self.parser.get_body_stream()
self.assertEqual(result, body_rcv)
- def test_received_nonsense_with_double_cr(self):
- data = b"""\
-HTTP/1.0 GET /foobar
-
-
-"""
+ def test_received_get_no_headers(self):
+ data = b"HTTP/1.0 GET /foobar\r\n\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 22)
+ self.assertEqual(result, 24)
self.assertTrue(self.parser.completed)
self.assertEqual(self.parser.headers, {})
def test_received_bad_host_header(self):
from waitress.utilities import BadRequest
- data = b"""\
-HTTP/1.0 GET /foobar
- Host: foo
-
-
-"""
+ data = b"HTTP/1.0 GET /foobar\r\n Host: foo\r\n\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 33)
+ self.assertEqual(result, 36)
self.assertTrue(self.parser.completed)
self.assertEqual(self.parser.error.__class__, BadRequest)
- def test_received_nonsense_nothing(self):
- data = b"""\
-
+ def test_received_bad_transfer_encoding(self):
+ from waitress.utilities import ServerNotImplemented
+ data = (
+ b"GET /foobar HTTP/1.1\r\n"
+ b"Transfer-Encoding: foo\r\n"
+ b"\r\n"
+ b"1d;\r\n"
+ b"This string has 29 characters\r\n"
+ b"0\r\n\r\n"
+ )
+ result = self.parser.received(data)
+ self.assertEqual(result, 48)
+ self.assertTrue(self.parser.completed)
+ self.assertEqual(self.parser.error.__class__, ServerNotImplemented)
-"""
+ def test_received_nonsense_nothing(self):
+ data = b"\r\n\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 2)
+ self.assertEqual(result, 4)
self.assertTrue(self.parser.completed)
self.assertEqual(self.parser.headers, {})
def test_received_no_doublecr(self):
- data = b"""\
-GET /foobar HTTP/8.4
-"""
+ data = b"GET /foobar HTTP/8.4\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 21)
+ self.assertEqual(result, 22)
self.assertFalse(self.parser.completed)
self.assertEqual(self.parser.headers, {})
@@ -93,13 +91,9 @@ GET /foobar HTTP/8.4
from waitress.utilities import RequestEntityTooLarge
self.parser.adj.max_request_body_size = 2
- data = b"""\
-GET /foobar HTTP/8.4
-Content-Length: 10
-
-"""
+ data = b"GET /foobar HTTP/8.4\r\nContent-Length: 10\r\n\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 41)
+ self.assertEqual(result, 44)
self.assertTrue(self.parser.completed)
self.assertTrue(isinstance(self.parser.error, RequestEntityTooLarge))
@@ -107,12 +101,9 @@ Content-Length: 10
from waitress.utilities import RequestHeaderFieldsTooLarge
self.parser.adj.max_request_header_size = 2
- data = b"""\
-GET /foobar HTTP/8.4
-X-Foo: 1
-"""
+ data = b"GET /foobar HTTP/8.4\r\nX-Foo: 1\r\n\r\n"
result = self.parser.received(data)
- self.assertEqual(result, 30)
+ self.assertEqual(result, 34)
self.assertTrue(self.parser.completed)
self.assertTrue(isinstance(self.parser.error, RequestHeaderFieldsTooLarge))
@@ -120,16 +111,18 @@ X-Foo: 1
from waitress.utilities import RequestEntityTooLarge
self.parser.adj.max_request_body_size = 2
- data = b"""\
-GET /foobar HTTP/1.1
-Transfer-Encoding: chunked
-X-Foo: 1
-
-20;\r\n
-This string has 32 characters\r\n
-0\r\n\r\n"""
+ data = (
+ b"GET /foobar HTTP/1.1\r\n"
+ b"Transfer-Encoding: chunked\r\n"
+ b"X-Foo: 1\r\n"
+ b"\r\n"
+ b"1d;\r\n"
+ b"This string has 29 characters\r\n"
+ b"0\r\n\r\n"
+ )
+
result = self.parser.received(data)
- self.assertEqual(result, 58)
+ self.assertEqual(result, 62)
self.parser.received(data[result:])
self.assertTrue(self.parser.completed)
self.assertTrue(isinstance(self.parser.error, RequestEntityTooLarge))
@@ -137,69 +130,119 @@ This string has 32 characters\r\n
def test_received_error_from_parser(self):
from waitress.utilities import BadRequest
- data = b"""\
-GET /foobar HTTP/1.1
-Transfer-Encoding: chunked
-X-Foo: 1
-
-garbage
-"""
+ data = (
+ b"GET /foobar HTTP/1.1\r\n"
+ b"Transfer-Encoding: chunked\r\n"
+ b"X-Foo: 1\r\n"
+ b"\r\n"
+ b"garbage\r\n"
+ )
# header
result = self.parser.received(data)
# body
result = self.parser.received(data[result:])
- self.assertEqual(result, 8)
+ self.assertEqual(result, 9)
self.assertTrue(self.parser.completed)
self.assertTrue(isinstance(self.parser.error, BadRequest))
def test_received_chunked_completed_sets_content_length(self):
- data = b"""\
-GET /foobar HTTP/1.1
-Transfer-Encoding: chunked
-X-Foo: 1
-
-20;\r\n
-This string has 32 characters\r\n
-0\r\n\r\n"""
+ data = (
+ b"GET /foobar HTTP/1.1\r\n"
+ b"Transfer-Encoding: chunked\r\n"
+ b"X-Foo: 1\r\n"
+ b"\r\n"
+ b"1d;\r\n"
+ b"This string has 29 characters\r\n"
+ b"0\r\n\r\n"
+ )
result = self.parser.received(data)
- self.assertEqual(result, 58)
+ self.assertEqual(result, 62)
data = data[result:]
result = self.parser.received(data)
self.assertTrue(self.parser.completed)
self.assertTrue(self.parser.error is None)
- self.assertEqual(self.parser.headers["CONTENT_LENGTH"], "32")
+ self.assertEqual(self.parser.headers["CONTENT_LENGTH"], "29")
def test_parse_header_gardenpath(self):
- data = b"""\
-GET /foobar HTTP/8.4
-foo: bar"""
+ data = b"GET /foobar HTTP/8.4\r\nfoo: bar\r\n"
self.parser.parse_header(data)
self.assertEqual(self.parser.first_line, b"GET /foobar HTTP/8.4")
self.assertEqual(self.parser.headers["FOO"], "bar")
def test_parse_header_no_cr_in_headerplus(self):
+ from waitress.parser import ParsingError
+
data = b"GET /foobar HTTP/8.4"
- self.parser.parse_header(data)
- self.assertEqual(self.parser.first_line, data)
+
+ try:
+ self.parser.parse_header(data)
+ except ParsingError:
+ pass
+ else: # pragma: nocover
+ self.assertTrue(False)
def test_parse_header_bad_content_length(self):
- data = b"GET /foobar HTTP/8.4\ncontent-length: abc"
- self.parser.parse_header(data)
- self.assertEqual(self.parser.body_rcv, None)
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\r\ncontent-length: abc\r\n"
+
+ try:
+ self.parser.parse_header(data)
+ except ParsingError as e:
+ self.assertIn("Content-Length is invalid", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_multiple_content_length(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\r\ncontent-length: 10\r\ncontent-length: 20\r\n"
+
+ try:
+ self.parser.parse_header(data)
+ except ParsingError as e:
+ self.assertIn("Content-Length is invalid", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
def test_parse_header_11_te_chunked(self):
# NB: test that capitalization of header value is unimportant
- data = b"GET /foobar HTTP/1.1\ntransfer-encoding: ChUnKed"
+ data = b"GET /foobar HTTP/1.1\r\ntransfer-encoding: ChUnKed\r\n"
self.parser.parse_header(data)
self.assertEqual(self.parser.body_rcv.__class__.__name__, "ChunkedReceiver")
+
+ def test_parse_header_transfer_encoding_invalid(self):
+ from waitress.parser import TransferEncodingNotImplemented
+
+ data = b"GET /foobar HTTP/1.1\r\ntransfer-encoding: gzip\r\n"
+
+ try:
+ self.parser.parse_header(data)
+ except TransferEncodingNotImplemented as e:
+ self.assertIn("Transfer-Encoding requested is not supported.", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_transfer_encoding_invalid_multiple(self):
+ from waitress.parser import TransferEncodingNotImplemented
+
+ data = b"GET /foobar HTTP/1.1\r\ntransfer-encoding: gzip\r\ntransfer-encoding: chunked\r\n"
+
+ try:
+ self.parser.parse_header(data)
+ except TransferEncodingNotImplemented as e:
+ self.assertIn("Transfer-Encoding requested is not supported.", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
def test_parse_header_11_expect_continue(self):
- data = b"GET /foobar HTTP/1.1\nexpect: 100-continue"
+ data = b"GET /foobar HTTP/1.1\r\nexpect: 100-continue\r\n"
self.parser.parse_header(data)
self.assertEqual(self.parser.expect_continue, True)
def test_parse_header_connection_close(self):
- data = b"GET /foobar HTTP/1.1\nConnection: close\n\n"
+ data = b"GET /foobar HTTP/1.1\r\nConnection: close\r\n"
self.parser.parse_header(data)
self.assertEqual(self.parser.connection_close, True)
@@ -213,6 +256,62 @@ foo: bar"""
self.parser.body_rcv = None
self.parser.close() # doesn't raise
+ def test_parse_header_lf_only(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\nfoo: bar"
+
+ try:
+ self.parser.parse_header(data)
+ except ParsingError:
+ pass
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_cr_only(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\rfoo: bar"
+ try:
+ self.parser.parse_header(data)
+ except ParsingError:
+ pass
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_extra_lf_in_header(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\r\nfoo: \nbar\r\n"
+ try:
+ self.parser.parse_header(data)
+ except ParsingError as e:
+ self.assertIn("Bare CR or LF found in header line", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_extra_lf_in_first_line(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar\n HTTP/8.4\r\n"
+ try:
+ self.parser.parse_header(data)
+ except ParsingError as e:
+ self.assertIn("Bare CR or LF found in HTTP message", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
+ def test_parse_header_invalid_whitespace(self):
+ from waitress.parser import ParsingError
+
+ data = b"GET /foobar HTTP/8.4\r\nfoo : bar\r\n"
+ try:
+ self.parser.parse_header(data)
+ except ParsingError as e:
+ self.assertIn("Invalid whitespace after field-name", e.args[0])
+ else: # pragma: nocover
+ self.assertTrue(False)
+
class Test_split_uri(unittest.TestCase):
def _callFUT(self, uri):
@@ -298,7 +397,7 @@ class Test_get_header_lines(unittest.TestCase):
return get_header_lines(data)
def test_get_header_lines(self):
- result = self._callFUT(b"slam\nslim")
+ result = self._callFUT(b"slam\r\nslim")
self.assertEqual(result, [b"slam", b"slim"])
def test_get_header_lines_folded(self):
@@ -310,11 +409,11 @@ class Test_get_header_lines(unittest.TestCase):
# interpreting the field value or forwarding the message downstream.
# We are just preserving the whitespace that indicates folding.
- result = self._callFUT(b"slim\n slam")
+ result = self._callFUT(b"slim\r\n slam")
self.assertEqual(result, [b"slim slam"])
def test_get_header_lines_tabbed(self):
- result = self._callFUT(b"slam\n\tslim")
+ result = self._callFUT(b"slam\r\n\tslim")
self.assertEqual(result, [b"slam\tslim"])
def test_get_header_lines_malformed(self):
@@ -361,22 +460,24 @@ class TestHTTPRequestParserIntegration(unittest.TestCase):
def feed(self, data):
parser = self.parser
+
for n in range(100): # make sure we never loop forever
consumed = parser.received(data)
data = data[consumed:]
+
if parser.completed:
return
raise ValueError("Looping") # pragma: no cover
def testSimpleGET(self):
- data = b"""\
-GET /foobar HTTP/8.4
-FirstName: mickey
-lastname: Mouse
-content-length: 7
-
-Hello.
-"""
+ data = (
+ b"GET /foobar HTTP/8.4\r\n"
+ b"FirstName: mickey\r\n"
+ b"lastname: Mouse\r\n"
+ b"content-length: 6\r\n"
+ b"\r\n"
+ b"Hello."
+ )
parser = self.parser
self.feed(data)
self.assertTrue(parser.completed)
@@ -384,24 +485,24 @@ Hello.
self.assertFalse(parser.empty)
self.assertEqual(
parser.headers,
- {"FIRSTNAME": "mickey", "LASTNAME": "Mouse", "CONTENT_LENGTH": "7",},
+ {"FIRSTNAME": "mickey", "LASTNAME": "Mouse", "CONTENT_LENGTH": "6",},
)
self.assertEqual(parser.path, "/foobar")
self.assertEqual(parser.command, "GET")
self.assertEqual(parser.query, "")
self.assertEqual(parser.proxy_scheme, "")
self.assertEqual(parser.proxy_netloc, "")
- self.assertEqual(parser.get_body_stream().getvalue(), b"Hello.\n")
+ self.assertEqual(parser.get_body_stream().getvalue(), b"Hello.")
def testComplexGET(self):
- data = b"""\
-GET /foo/a+%2B%2F%C3%A4%3D%26a%3Aint?d=b+%2B%2F%3D%26b%3Aint&c+%2B%2F%3D%26c%3Aint=6 HTTP/8.4
-FirstName: mickey
-lastname: Mouse
-content-length: 10
-
-Hello mickey.
-"""
+ data = (
+ b"GET /foo/a+%2B%2F%C3%A4%3D%26a%3Aint?d=b+%2B%2F%3D%26b%3Aint&c+%2B%2F%3D%26c%3Aint=6 HTTP/8.4\r\n"
+ b"FirstName: mickey\r\n"
+ b"lastname: Mouse\r\n"
+ b"content-length: 10\r\n"
+ b"\r\n"
+ b"Hello mickey."
+ )
parser = self.parser
self.feed(data)
self.assertEqual(parser.command, "GET")
@@ -409,7 +510,7 @@ Hello mickey.
self.assertFalse(parser.empty)
self.assertEqual(
parser.headers,
- {"FIRSTNAME": "mickey", "LASTNAME": "Mouse", "CONTENT_LENGTH": "10",},
+ {"FIRSTNAME": "mickey", "LASTNAME": "Mouse", "CONTENT_LENGTH": "10"},
)
# path should be utf-8 encoded
self.assertEqual(
@@ -422,59 +523,59 @@ Hello mickey.
self.assertEqual(parser.get_body_stream().getvalue(), b"Hello mick")
def testProxyGET(self):
- data = b"""\
-GET https://example.com:8080/foobar HTTP/8.4
-content-length: 7
-
-Hello.
-"""
+ data = (
+ b"GET https://example.com:8080/foobar HTTP/8.4\r\n"
+ b"content-length: 6\r\n"
+ b"\r\n"
+ b"Hello."
+ )
parser = self.parser
self.feed(data)
self.assertTrue(parser.completed)
self.assertEqual(parser.version, "8.4")
self.assertFalse(parser.empty)
- self.assertEqual(parser.headers, {"CONTENT_LENGTH": "7",})
+ self.assertEqual(parser.headers, {"CONTENT_LENGTH": "6"})
self.assertEqual(parser.path, "/foobar")
self.assertEqual(parser.command, "GET")
self.assertEqual(parser.proxy_scheme, "https")
self.assertEqual(parser.proxy_netloc, "example.com:8080")
self.assertEqual(parser.command, "GET")
self.assertEqual(parser.query, "")
- self.assertEqual(parser.get_body_stream().getvalue(), b"Hello.\n")
+ self.assertEqual(parser.get_body_stream().getvalue(), b"Hello.")
def testDuplicateHeaders(self):
# Ensure that headers with the same key get concatenated as per
# RFC2616.
- data = b"""\
-GET /foobar HTTP/8.4
-x-forwarded-for: 10.11.12.13
-x-forwarded-for: unknown,127.0.0.1
-X-Forwarded_for: 255.255.255.255
-content-length: 7
-
-Hello.
-"""
+ data = (
+ b"GET /foobar HTTP/8.4\r\n"
+ b"x-forwarded-for: 10.11.12.13\r\n"
+ b"x-forwarded-for: unknown,127.0.0.1\r\n"
+ b"X-Forwarded_for: 255.255.255.255\r\n"
+ b"content-length: 6\r\n"
+ b"\r\n"
+ b"Hello."
+ )
self.feed(data)
self.assertTrue(self.parser.completed)
self.assertEqual(
self.parser.headers,
{
- "CONTENT_LENGTH": "7",
+ "CONTENT_LENGTH": "6",
"X_FORWARDED_FOR": "10.11.12.13, unknown,127.0.0.1",
},
)
def testSpoofedHeadersDropped(self):
- data = b"""\
-GET /foobar HTTP/8.4
-x-auth_user: bob
-content-length: 7
-
-Hello.
-"""
+ data = (
+ b"GET /foobar HTTP/8.4\r\n"
+ b"x-auth_user: bob\r\n"
+ b"content-length: 6\r\n"
+ b"\r\n"
+ b"Hello."
+ )
self.feed(data)
self.assertTrue(self.parser.completed)
- self.assertEqual(self.parser.headers, {"CONTENT_LENGTH": "7",})
+ self.assertEqual(self.parser.headers, {"CONTENT_LENGTH": "6",})
class DummyBodyStream(object):
diff --git a/waitress/tests/test_receiver.py b/waitress/tests/test_receiver.py
index fcd0305..b4910bb 100644
--- a/waitress/tests/test_receiver.py
+++ b/waitress/tests/test_receiver.py
@@ -83,27 +83,27 @@ class TestChunkedReceiver(unittest.TestCase):
def test_received_control_line_finished_garbage_in_input(self):
buf = DummyBuffer()
inst = self._makeOne(buf)
- result = inst.received(b"garbage\n")
- self.assertEqual(result, 8)
+ result = inst.received(b"garbage\r\n")
+ self.assertEqual(result, 9)
self.assertTrue(inst.error)
def test_received_control_line_finished_all_chunks_not_received(self):
buf = DummyBuffer()
inst = self._makeOne(buf)
- result = inst.received(b"a;discard\n")
+ result = inst.received(b"a;discard\r\n")
self.assertEqual(inst.control_line, b"")
self.assertEqual(inst.chunk_remainder, 10)
self.assertEqual(inst.all_chunks_received, False)
- self.assertEqual(result, 10)
+ self.assertEqual(result, 11)
self.assertEqual(inst.completed, False)
def test_received_control_line_finished_all_chunks_received(self):
buf = DummyBuffer()
inst = self._makeOne(buf)
- result = inst.received(b"0;discard\n")
+ result = inst.received(b"0;discard\r\n")
self.assertEqual(inst.control_line, b"")
self.assertEqual(inst.all_chunks_received, True)
- self.assertEqual(result, 10)
+ self.assertEqual(result, 11)
self.assertEqual(inst.completed, False)
def test_received_trailer_startswith_crlf(self):
@@ -120,7 +120,7 @@ class TestChunkedReceiver(unittest.TestCase):
inst.all_chunks_received = True
result = inst.received(b"\n")
self.assertEqual(result, 1)
- self.assertEqual(inst.completed, True)
+ self.assertEqual(inst.completed, False)
def test_received_trailer_not_finished(self):
buf = DummyBuffer()
@@ -154,6 +154,77 @@ class TestChunkedReceiver(unittest.TestCase):
inst = self._makeOne(buf)
self.assertEqual(inst.__len__(), 2)
+ def test_received_chunk_is_properly_terminated(self):
+ buf = DummyBuffer()
+ inst = self._makeOne(buf)
+ data = b"4\r\nWiki\r\n"
+ result = inst.received(data)
+ self.assertEqual(result, len(data))
+ self.assertEqual(inst.completed, False)
+ self.assertEqual(buf.data[0], b"Wiki")
+
+ def test_received_chunk_not_properly_terminated(self):
+ from waitress.utilities import BadRequest
+
+ buf = DummyBuffer()
+ inst = self._makeOne(buf)
+ data = b"4\r\nWikibadchunk\r\n"
+ result = inst.received(data)
+ self.assertEqual(result, len(data))
+ self.assertEqual(inst.completed, False)
+ self.assertEqual(buf.data[0], b"Wiki")
+ self.assertEqual(inst.error.__class__, BadRequest)
+
+ def test_received_multiple_chunks(self):
+ from waitress.utilities import BadRequest
+
+ buf = DummyBuffer()
+ inst = self._makeOne(buf)
+ data = (
+ b"4\r\n"
+ b"Wiki\r\n"
+ b"5\r\n"
+ b"pedia\r\n"
+ b"E\r\n"
+ b" in\r\n"
+ b"\r\n"
+ b"chunks.\r\n"
+ b"0\r\n"
+ b"\r\n"
+ )
+ result = inst.received(data)
+ self.assertEqual(result, len(data))
+ self.assertEqual(inst.completed, True)
+ self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.")
+ self.assertEqual(inst.error, None)
+
+ def test_received_multiple_chunks_split(self):
+ from waitress.utilities import BadRequest
+
+ buf = DummyBuffer()
+ inst = self._makeOne(buf)
+ data1 = b"4\r\nWiki\r"
+ result = inst.received(data1)
+ self.assertEqual(result, len(data1))
+
+ data2 = (
+ b"\n5\r\n"
+ b"pedia\r\n"
+ b"E\r\n"
+ b" in\r\n"
+ b"\r\n"
+ b"chunks.\r\n"
+ b"0\r\n"
+ b"\r\n"
+ )
+
+ result = inst.received(data2)
+ self.assertEqual(result, len(data2))
+
+ self.assertEqual(inst.completed, True)
+ self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.")
+ self.assertEqual(inst.error, None)
+
class DummyBuffer(object):
def __init__(self, data=None):
diff --git a/waitress/tests/test_task.py b/waitress/tests/test_task.py
index 584add1..1a86245 100644
--- a/waitress/tests/test_task.py
+++ b/waitress/tests/test_task.py
@@ -876,15 +876,16 @@ class TestErrorTask(unittest.TestCase):
inst.version = "1.1"
inst.execute()
lines = filter_lines(inst.channel.written)
- self.assertEqual(len(lines), 8)
+ self.assertEqual(len(lines), 9)
self.assertEqual(lines[0], b"HTTP/1.1 432 Too Ugly")
- self.assertEqual(lines[1], b"Content-Length: 43")
- self.assertEqual(lines[2], b"Content-Type: text/plain")
- self.assertTrue(lines[3])
- self.assertEqual(lines[4], b"Server: waitress")
- self.assertEqual(lines[5], b"Too Ugly")
- self.assertEqual(lines[6], b"body")
- self.assertEqual(lines[7], b"(generated by waitress)")
+ self.assertEqual(lines[1], b"Connection: close")
+ self.assertEqual(lines[2], b"Content-Length: 43")
+ self.assertEqual(lines[3], b"Content-Type: text/plain")
+ self.assertTrue(lines[4])
+ self.assertEqual(lines[5], b"Server: waitress")
+ self.assertEqual(lines[6], b"Too Ugly")
+ self.assertEqual(lines[7], b"body")
+ self.assertEqual(lines[8], b"(generated by waitress)")
def test_execute_http_11_close(self):
inst = self._makeOne()
@@ -903,21 +904,22 @@ class TestErrorTask(unittest.TestCase):
self.assertEqual(lines[7], b"body")
self.assertEqual(lines[8], b"(generated by waitress)")
- def test_execute_http_11_keep(self):
+ def test_execute_http_11_keep_forces_close(self):
inst = self._makeOne()
inst.version = "1.1"
inst.request.headers["CONNECTION"] = "keep-alive"
inst.execute()
lines = filter_lines(inst.channel.written)
- self.assertEqual(len(lines), 8)
+ self.assertEqual(len(lines), 9)
self.assertEqual(lines[0], b"HTTP/1.1 432 Too Ugly")
- self.assertEqual(lines[1], b"Content-Length: 43")
- self.assertEqual(lines[2], b"Content-Type: text/plain")
- self.assertTrue(lines[3])
- self.assertEqual(lines[4], b"Server: waitress")
- self.assertEqual(lines[5], b"Too Ugly")
- self.assertEqual(lines[6], b"body")
- self.assertEqual(lines[7], b"(generated by waitress)")
+ self.assertEqual(lines[1], b"Connection: close")
+ self.assertEqual(lines[2], b"Content-Length: 43")
+ self.assertEqual(lines[3], b"Content-Type: text/plain")
+ self.assertTrue(lines[4])
+ self.assertEqual(lines[5], b"Server: waitress")
+ self.assertEqual(lines[6], b"Too Ugly")
+ self.assertEqual(lines[7], b"body")
+ self.assertEqual(lines[8], b"(generated by waitress)")
class DummyTask(object):
diff --git a/waitress/tests/test_utilities.py b/waitress/tests/test_utilities.py
index c5c01d9..15cd24f 100644
--- a/waitress/tests/test_utilities.py
+++ b/waitress/tests/test_utilities.py
@@ -83,7 +83,7 @@ class Test_find_double_newline(unittest.TestCase):
self.assertEqual(self._callFUT(b"\n"), -1)
def test_double_linefeed(self):
- self.assertEqual(self._callFUT(b"\n\n"), 2)
+ self.assertEqual(self._callFUT(b"\n\n"), -1)
def test_one_crlf(self):
self.assertEqual(self._callFUT(b"\r\n"), -1)
@@ -92,7 +92,7 @@ class Test_find_double_newline(unittest.TestCase):
self.assertEqual(self._callFUT(b"\r\n\r\n"), 4)
def test_mixed(self):
- self.assertEqual(self._callFUT(b"\n\n00\r\n\r\n"), 2)
+ self.assertEqual(self._callFUT(b"\n\n00\r\n\r\n"), 8)
class TestBadRequest(unittest.TestCase):
diff --git a/waitress/utilities.py b/waitress/utilities.py
index c77af49..0336897 100644
--- a/waitress/utilities.py
+++ b/waitress/utilities.py
@@ -28,20 +28,12 @@ queue_logger = logging.getLogger("waitress.queue")
def find_double_newline(s):
"""Returns the position just after a double newline in the given string."""
- pos1 = s.find(b"\n\r\n") # One kind of double newline
- if pos1 >= 0:
- pos1 += 3
- pos2 = s.find(b"\n\n") # Another kind of double newline
- if pos2 >= 0:
- pos2 += 2
-
- if pos1 >= 0:
- if pos2 >= 0:
- return min(pos1, pos2)
- else:
- return pos1
- else:
- return pos2
+ pos = s.find(b"\r\n\r\n")
+
+ if pos >= 0:
+ pos += 4
+
+ return pos
def concat(*args):
@@ -271,6 +263,9 @@ def cleanup_unix_socket(path):
class Error(object):
+ code = 500
+ reason = "Internal Server Error"
+
def __init__(self, body):
self.body = body
@@ -280,6 +275,7 @@ class Error(object):
tag = "\r\n\r\n(generated by waitress)"
body = body + tag
headers = [("Content-Type", "text/plain")]
+
return status, headers, body
def wsgi_response(self, environ, start_response):
@@ -306,3 +302,8 @@ class RequestEntityTooLarge(BadRequest):
class InternalServerError(Error):
code = 500
reason = "Internal Server Error"
+
+
+class ServerNotImplemented(Error):
+ code = 501
+ reason = "Not Implemented"