summaryrefslogtreecommitdiff
path: root/libgo/go/net/http/transfer.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/net/http/transfer.go')
-rw-r--r--libgo/go/net/http/transfer.go139
1 files changed, 64 insertions, 75 deletions
diff --git a/libgo/go/net/http/transfer.go b/libgo/go/net/http/transfer.go
index 2e01a07f84f..50d434b1fb0 100644
--- a/libgo/go/net/http/transfer.go
+++ b/libgo/go/net/http/transfer.go
@@ -310,7 +310,7 @@ func (t *transferWriter) writeHeader(w io.Writer, trace *httptrace.ClientTrace)
k = CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
- return &badStringError{"invalid Trailer key", k}
+ return badStringError("invalid Trailer key", k)
}
keys = append(keys, k)
}
@@ -335,7 +335,7 @@ func (t *transferWriter) writeBody(w io.Writer) error {
var ncopy int64
// Write body. We "unwrap" the body first if it was wrapped in a
- // nopCloser. This is to ensure that we can take advantage of
+ // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
// OS-level optimizations in the event that the body is an
// *os.File.
if t.Body != nil {
@@ -413,7 +413,10 @@ func (t *transferWriter) unwrapBody() io.Reader {
if reflect.TypeOf(t.Body) == nopCloserType {
return reflect.ValueOf(t.Body).Field(0).Interface().(io.Reader)
}
-
+ if r, ok := t.Body.(*readTrackingBody); ok {
+ r.didRead = true
+ return r.ReadCloser
+ }
return t.Body
}
@@ -425,11 +428,11 @@ type transferReader struct {
ProtoMajor int
ProtoMinor int
// Output
- Body io.ReadCloser
- ContentLength int64
- TransferEncoding []string
- Close bool
- Trailer Header
+ Body io.ReadCloser
+ ContentLength int64
+ Chunked bool
+ Close bool
+ Trailer Header
}
func (t *transferReader) protoAtLeast(m, n int) bool {
@@ -501,13 +504,12 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
t.ProtoMajor, t.ProtoMinor = 1, 1
}
- // Transfer encoding, content length
- err = t.fixTransferEncoding()
- if err != nil {
+ // Transfer-Encoding: chunked, and overriding Content-Length.
+ if err := t.parseTransferEncoding(); err != nil {
return err
}
- realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
+ realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
if err != nil {
return err
}
@@ -522,7 +524,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
}
// Trailer
- t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
+ t.Trailer, err = fixTrailer(t.Header, t.Chunked)
if err != nil {
return err
}
@@ -532,9 +534,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
// See RFC 7230, section 3.3.
switch msg.(type) {
case *Response:
- if realLength == -1 &&
- !chunked(t.TransferEncoding) &&
- bodyAllowedForStatus(t.StatusCode) {
+ if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
// Unbounded body.
t.Close = true
}
@@ -543,7 +543,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
// Prepare body reader. ContentLength < 0 means chunked encoding
// or close connection when finished, since multipart is not supported yet
switch {
- case chunked(t.TransferEncoding):
+ case t.Chunked:
if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
t.Body = NoBody
} else {
@@ -569,13 +569,17 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
case *Request:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
case *Response:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
}
@@ -605,8 +609,8 @@ func isUnsupportedTEError(err error) bool {
return ok
}
-// fixTransferEncoding sanitizes t.TransferEncoding, if needed.
-func (t *transferReader) fixTransferEncoding() error {
+// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
+func (t *transferReader) parseTransferEncoding() error {
raw, present := t.Header["Transfer-Encoding"]
if !present {
return nil
@@ -618,56 +622,38 @@ func (t *transferReader) fixTransferEncoding() error {
return nil
}
- encodings := strings.Split(raw[0], ",")
- te := make([]string, 0, len(encodings))
- // TODO: Even though we only support "identity" and "chunked"
- // encodings, the loop below is designed with foresight. One
- // invariant that must be maintained is that, if present,
- // chunked encoding must always come first.
- for _, encoding := range encodings {
- encoding = strings.ToLower(strings.TrimSpace(encoding))
- // "identity" encoding is not recorded
- if encoding == "identity" {
- break
- }
- if encoding != "chunked" {
- return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", encoding)}
- }
- te = te[0 : len(te)+1]
- te[len(te)-1] = encoding
- }
- if len(te) > 1 {
- return &badStringError{"too many transfer encodings", strings.Join(te, ",")}
- }
- if len(te) > 0 {
- // RFC 7230 3.3.2 says "A sender MUST NOT send a
- // Content-Length header field in any message that
- // contains a Transfer-Encoding header field."
- //
- // but also:
- // "If a message is received with both a
- // Transfer-Encoding and a Content-Length header
- // field, the Transfer-Encoding overrides the
- // Content-Length. Such a message might indicate an
- // attempt to perform request smuggling (Section 9.5)
- // or response splitting (Section 9.4) and ought to be
- // handled as an error. A sender MUST remove the
- // received Content-Length field prior to forwarding
- // such a message downstream."
- //
- // Reportedly, these appear in the wild.
- delete(t.Header, "Content-Length")
- t.TransferEncoding = te
- return nil
+ // Like nginx, we only support a single Transfer-Encoding header field, and
+ // only if set to "chunked". This is one of the most security sensitive
+ // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
+ // strict and simple.
+ if len(raw) != 1 {
+ return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
}
+ if strings.ToLower(textproto.TrimString(raw[0])) != "chunked" {
+ return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
+ }
+
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
+ // in any message that contains a Transfer-Encoding header field."
+ //
+ // but also: "If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to perform
+ // request smuggling (Section 9.5) or response splitting (Section 9.4) and
+ // ought to be handled as an error. A sender MUST remove the received
+ // Content-Length field prior to forwarding such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
+ delete(t.Header, "Content-Length")
+ t.Chunked = true
return nil
}
// Determine the expected body length, using RFC 7230 Section 3.3. This
// function is not a method, because ultimately it should be shared by
// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
+func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
isRequest := !isResponse
contentLens := header["Content-Length"]
@@ -677,9 +663,9 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
// Content-Length headers if they differ in value.
// If there are dups of the value, remove the dups.
// See Issue 16490.
- first := strings.TrimSpace(contentLens[0])
+ first := textproto.TrimString(contentLens[0])
for _, ct := range contentLens[1:] {
- if first != strings.TrimSpace(ct) {
+ if first != textproto.TrimString(ct) {
return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
}
}
@@ -711,14 +697,14 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
}
// Logic based on Transfer-Encoding
- if chunked(te) {
+ if chunked {
return -1, nil
}
// Logic based on Content-Length
var cl string
if len(contentLens) == 1 {
- cl = strings.TrimSpace(contentLens[0])
+ cl = textproto.TrimString(contentLens[0])
}
if cl != "" {
n, err := parseContentLength(cl)
@@ -766,12 +752,12 @@ func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
}
// Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, error) {
+func fixTrailer(header Header, chunked bool) (Header, error) {
vv, ok := header["Trailer"]
if !ok {
return nil, nil
}
- if !chunked(te) {
+ if !chunked {
// Trailer and no chunking:
// this is an invalid use case for trailer header.
// Nevertheless, no error will be returned and we
@@ -791,7 +777,7 @@ func fixTrailer(header Header, te []string) (Header, error) {
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
if err == nil {
- err = &badStringError{"bad trailer key", key}
+ err = badStringError("bad trailer key", key)
return
}
}
@@ -1049,15 +1035,15 @@ func (bl bodyLocked) Read(p []byte) (n int, err error) {
// parseContentLength trims whitespace from s and returns -1 if no value
// is set, or the value if it's >= 0.
func parseContentLength(cl string) (int64, error) {
- cl = strings.TrimSpace(cl)
+ cl = textproto.TrimString(cl)
if cl == "" {
return -1, nil
}
- n, err := strconv.ParseInt(cl, 10, 64)
- if err != nil || n < 0 {
- return 0, &badStringError{"bad Content-Length", cl}
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return 0, badStringError("bad Content-Length", cl)
}
- return n, nil
+ return int64(n), nil
}
@@ -1092,6 +1078,9 @@ func isKnownInMemoryReader(r io.Reader) bool {
if reflect.TypeOf(r) == nopCloserType {
return isKnownInMemoryReader(reflect.ValueOf(r).Field(0).Interface().(io.Reader))
}
+ if r, ok := r.(*readTrackingBody); ok {
+ return isKnownInMemoryReader(r.ReadCloser)
+ }
return false
}