summaryrefslogtreecommitdiff
path: root/Utilities/cmcurl/lib/transfer.c
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmcurl/lib/transfer.c')
-rw-r--r--Utilities/cmcurl/lib/transfer.c112
1 files changed, 84 insertions, 28 deletions
diff --git a/Utilities/cmcurl/lib/transfer.c b/Utilities/cmcurl/lib/transfer.c
index 315da876c4..441da73429 100644
--- a/Utilities/cmcurl/lib/transfer.c
+++ b/Utilities/cmcurl/lib/transfer.c
@@ -18,6 +18,8 @@
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
+ * SPDX-License-Identifier: curl
+ *
***************************************************************************/
#include "curl_setup.h"
@@ -202,6 +204,7 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
}
#endif
+#ifndef CURL_DISABLE_HTTP
/* if we are transmitting trailing data, we don't need to write
a chunk size so we skip this */
if(data->req.upload_chunky &&
@@ -211,7 +214,6 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
}
-#ifndef CURL_DISABLE_HTTP
if(data->state.trailers_state == TRAILERS_SENDING) {
/* if we're here then that means that we already sent the last empty chunk
but we didn't send a final CR LF, so we sent 0 CR LF. We then start
@@ -267,6 +269,7 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
return CURLE_READ_ERROR;
}
+#ifndef CURL_DISABLE_HTTP
if(!data->req.forbidchunk && data->req.upload_chunky) {
/* if chunked Transfer-Encoding
* build chunk:
@@ -317,15 +320,12 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
/* always append ASCII CRLF to the data unless
we have a valid trailer callback */
-#ifndef CURL_DISABLE_HTTP
if((nread-hexlen) == 0 &&
data->set.trailer_callback != NULL &&
data->state.trailers_state == TRAILERS_NONE) {
data->state.trailers_state = TRAILERS_INITIALIZED;
}
- else
-#endif
- {
+ else {
memcpy(data->req.upload_fromhere + nread,
endofline_network,
strlen(endofline_network));
@@ -333,7 +333,6 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
}
}
-#ifndef CURL_DISABLE_HTTP
if(data->state.trailers_state == TRAILERS_SENDING &&
!trailers_left(data)) {
Curl_dyn_free(&data->state.trailers_buf);
@@ -345,7 +344,6 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
infof(data, "Signaling end of chunked upload after trailers.");
}
else
-#endif
if((nread - hexlen) == 0 &&
data->state.trailers_state != TRAILERS_INITIALIZED) {
/* mark this as done once this chunk is transferred */
@@ -357,6 +355,7 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
if(added_crlf)
nread += strlen(endofline_network); /* for the added end of line */
}
+#endif
*nreadp = nread;
@@ -540,6 +539,13 @@ static CURLcode readwrite_data(struct Curl_easy *data,
bool is_http2 = ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
(conn->httpversion == 20));
#endif
+ bool is_http3 =
+#ifdef ENABLE_QUIC
+ ((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ (conn->httpversion == 30));
+#else
+ FALSE;
+#endif
if(
#ifdef USE_NGHTTP2
@@ -550,6 +556,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
for a particular stream. */
!is_http2 &&
#endif
+ !is_http3 && /* Same reason mentioned above. */
k->size != -1 && !k->header) {
/* make sure we don't read too much */
curl_off_t totalleft = k->size - k->bytecount;
@@ -597,6 +604,9 @@ static CURLcode readwrite_data(struct Curl_easy *data,
DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
else
#endif
+ if(is_http3 && !nread)
+ DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
+ else
DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
k->keepon &= ~KEEP_RECV;
break;
@@ -754,7 +764,13 @@ static CURLcode readwrite_data(struct Curl_easy *data,
if(nread < 0) /* this should be unusual */
nread = 0;
- k->keepon &= ~KEEP_RECV; /* we're done reading */
+ /* HTTP/3 over QUIC should keep reading until QUIC connection
+ is closed. In contrast to HTTP/2 which can stop reading
+ from TCP connection, HTTP/3 over QUIC needs ACK from server
+ to ensure stream closure. It should keep reading. */
+ if(!is_http3) {
+ k->keepon &= ~KEEP_RECV; /* we're done reading */
+ }
}
k->bytecount += nread;
@@ -900,6 +916,9 @@ static void win_update_buffer_size(curl_socket_t sockfd)
#define win_update_buffer_size(x)
#endif
+#define curl_upload_refill_watermark(data) \
+ ((ssize_t)((data)->set.upload_buffer_size >> 5))
+
/*
* Send data to upload to the server, when the socket is writable.
*/
@@ -921,13 +940,25 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
do {
curl_off_t nbody;
+ ssize_t offset = 0;
+
+ if(0 != k->upload_present &&
+ k->upload_present < curl_upload_refill_watermark(data) &&
+ !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
+ !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
+ !(k->writebytecount + k->upload_present - k->pendingheader ==
+ data->state.infilesize)) {
+ offset = k->upload_present;
+ }
/* only read more data if there's no upload data already
- present in the upload buffer */
- if(0 == k->upload_present) {
+ present in the upload buffer, or if appending to upload buffer */
+ if(0 == k->upload_present || offset) {
result = Curl_get_upload_buffer(data);
if(result)
return result;
+ if(offset && k->upload_fromhere != data->state.ulbuf)
+ memmove(data->state.ulbuf, k->upload_fromhere, offset);
/* init the "upload from here" pointer */
k->upload_fromhere = data->state.ulbuf;
@@ -960,12 +991,14 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
sending_http_headers = FALSE;
}
- result = Curl_fillreadbuffer(data, data->set.upload_buffer_size,
+ k->upload_fromhere += offset;
+ result = Curl_fillreadbuffer(data, data->set.upload_buffer_size-offset,
&fillcount);
+ k->upload_fromhere -= offset;
if(result)
return result;
- nread = fillcount;
+ nread = offset + fillcount;
}
else
nread = 0; /* we're done uploading/reading */
@@ -1007,7 +1040,9 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
* That means the hex values for ASCII CR (0x0d) & LF (0x0a)
* must be used instead of the escape sequences \r & \n.
*/
- for(i = 0, si = 0; i < nread; i++, si++) {
+ if(offset)
+ memcpy(data->state.scratch, k->upload_fromhere, offset);
+ for(i = offset, si = offset; i < nread; i++, si++) {
if(k->upload_fromhere[i] == 0x0a) {
data->state.scratch[si++] = 0x0d;
data->state.scratch[si] = 0x0a;
@@ -1037,12 +1072,12 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
#ifndef CURL_DISABLE_SMTP
if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
- result = Curl_smtp_escape_eob(data, nread);
+ result = Curl_smtp_escape_eob(data, nread, offset);
if(result)
return result;
}
#endif /* CURL_DISABLE_SMTP */
- } /* if 0 == k->upload_present */
+ } /* if 0 == k->upload_present or appended to upload buffer */
else {
/* We have a partial buffer left from a previous "round". Use
that instead of reading more data */
@@ -1153,10 +1188,12 @@ CURLcode Curl_readwrite(struct connectdata *conn,
else
fd_write = CURL_SOCKET_BAD;
+#if defined(USE_HTTP2) || defined(USE_HTTP3)
if(data->state.drain) {
select_res |= CURL_CSELECT_IN;
DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data"));
}
+#endif
if(!select_res) /* Call for select()/poll() only, if read/write/error
status is not known. */
@@ -1222,6 +1259,14 @@ CURLcode Curl_readwrite(struct connectdata *conn,
infof(data, "Done waiting for 100-continue");
}
}
+
+#ifdef ENABLE_QUIC
+ if(conn->transport == TRNSPRT_QUIC) {
+ result = Curl_quic_idle(data);
+ if(result)
+ return result;
+ }
+#endif
}
if(Curl_pgrsUpdate(data))
@@ -1394,7 +1439,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
if(result)
return result;
- data->state.wildcardmatch = data->set.wildcard_enabled;
+ data->state.requests = 0;
data->state.followlocation = 0; /* reset the location-follow counter */
data->state.this_is_a_follow = FALSE; /* reset this */
data->state.errorbuf = FALSE; /* no error has occurred */
@@ -1416,10 +1461,11 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
else
data->state.infilesize = 0;
+#ifndef CURL_DISABLE_COOKIES
/* If there is a list of cookie files to read, do it now! */
if(data->state.cookielist)
Curl_cookie_loadfiles(data);
-
+#endif
/* If there is a list of host pairs to deal with */
if(data->state.resolve)
result = Curl_loadhostpairs(data);
@@ -1449,6 +1495,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
data->state.authproxy.picked &= data->state.authproxy.want;
#ifndef CURL_DISABLE_FTP
+ data->state.wildcardmatch = data->set.wildcard_enabled;
if(data->state.wildcardmatch) {
struct WildcardData *wc = &data->wildcard;
if(wc->state < CURLWC_INIT) {
@@ -1562,7 +1609,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
data->state.referer_alloc = FALSE;
}
- /* Make a copy of the URL without crenditals and fragment */
+ /* Make a copy of the URL without credentials and fragment */
u = curl_url();
if(!u)
return CURLE_OUT_OF_MEMORY;
@@ -1590,7 +1637,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
if((type != FOLLOW_RETRY) &&
(data->req.httpcode != 401) && (data->req.httpcode != 407) &&
- Curl_is_absolute_url(newurl, NULL, 0))
+ Curl_is_absolute_url(newurl, NULL, 0, FALSE))
/* If this is not redirect due to a 401 or 407 response and an absolute
URL: don't allow a custom port number */
disallowport = TRUE;
@@ -1599,10 +1646,14 @@ CURLcode Curl_follow(struct Curl_easy *data,
uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
(type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
- CURLU_ALLOW_SPACE);
+ CURLU_ALLOW_SPACE |
+ (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
if(uc) {
- if(type != FOLLOW_FAKE)
+ if(type != FOLLOW_FAKE) {
+ failf(data, "The redirect target URL could not be parsed: %s",
+ curl_url_strerror(uc));
return Curl_uc_to_curlcode(uc);
+ }
/* the URL could not be parsed for some reason, but since this is FAKE
mode, just duplicate the field as-is */
@@ -1649,7 +1700,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
return Curl_uc_to_curlcode(uc);
}
- p = Curl_builtin_scheme(scheme);
+ p = Curl_builtin_scheme(scheme, CURL_ZERO_TERMINATED);
if(p && (p->protocol != data->info.conn_protocol)) {
infof(data, "Clear auth, redirects scheme from %s to %s",
data->info.conn_scheme, scheme);
@@ -1813,10 +1864,12 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
return CURLE_OK;
if((data->req.bytecount + data->req.headerbytecount == 0) &&
- conn->bits.reuse &&
- (!data->set.opt_no_body
- || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
- (data->set.rtspreq != RTSPREQ_RECEIVE))
+ conn->bits.reuse &&
+ (!data->set.opt_no_body || (conn->handler->protocol & PROTO_FAMILY_HTTP))
+#ifndef CURL_DISABLE_RTSP
+ && (data->set.rtspreq != RTSPREQ_RECEIVE)
+#endif
+ )
/* We got no data, we attempted to re-use a connection. For HTTP this
can be a retry so we try again regardless if we expected a body.
For other protocols we only try again only if we expected a body.
@@ -1888,11 +1941,14 @@ Curl_setup_transfer(
struct SingleRequest *k = &data->req;
struct connectdata *conn = data->conn;
struct HTTP *http = data->req.p.http;
- bool httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- (http->sending == HTTPSEND_REQUEST));
+ bool httpsending;
+
DEBUGASSERT(conn != NULL);
DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
+ httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
+ (http->sending == HTTPSEND_REQUEST));
+
if(conn->bits.multiplex || conn->httpversion == 20 || httpsending) {
/* when multiplexing, the read/write sockets need to be the same! */
conn->sockfd = sockindex == -1 ?