diff options
author | Nick Vatamaniuc <vatamane@apache.org> | 2018-03-23 18:03:05 -0400 |
---|---|---|
committer | Jan Lehnardt <jan@apache.org> | 2018-03-26 11:03:51 +0200 |
commit | e7c48b3d2db46e05f651567d611fb51f8ec7db1f (patch) | |
tree | fae9b9647da1221a0b21b04ea8d1a5bb28d64a9a | |
parent | 3d702d8e85068d65272dc10b4fa7015bf66fedf5 (diff) | |
download | couchdb-e7c48b3d2db46e05f651567d611fb51f8ec7db1f.tar.gz |
Improve 413 response handling
Previously, when the server decided too much data was sent in the client's
request, it would immediately send a 413 response and close the socket. In the
meantime there could be unread data on the socket since the client keeps
streaming data. When this happens the connection is reset instead of going
through regular close sequence. The client, specifically the replicator client,
detected the reset before it had a chance to process the 413 response. This
lead to a retry, since it was interpreted as generic network error, instead of
a proper 413 HTTP error.
The improvement is to flush the receive socket before and after sending a 413
response, then close the connection. This reduces the chance of the socket
being closed with unread data, avoids a TCP reset, and gives the client a
better chance of parsing the 413 response. This is mostly geared to work with
the replicator client but should help other clients as well.
Also the connection on both the server and the client sides is closed after a
413 event. This avoids a few race conditions were it is not clear how much data
is on the socket after the 413 is processed. On the server side, the `close`
response header is set and socket is closed. On the client side, a flag is set
such that right before the worker release back to the pool it is stopped, which
closes the socket.
-rw-r--r-- | src/couch/src/couch_httpd.erl | 13 | ||||
-rw-r--r-- | src/couch_replicator/src/couch_replicator_httpc.erl | 20 |
2 files changed, 26 insertions, 7 deletions
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index 1694ac87f..050282a0c 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -1170,6 +1170,19 @@ before_response(Req0, Code0, Headers0, Args0) -> respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) -> MochiReq:start_response({Code, Headers}); +respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) -> + % Special handling for the 413 response. Make sure the socket is closed as + % we don't know how much data was read before the error was thrown. Also + % drain all the data in the receive buffer to avoid connction being reset + % before the 413 response is parsed by the client. This is still racy, it + % just increases the chances of 413 being detected correctly by the client + % (rather than getting a brutal TCP reset). + erlang:put(mochiweb_request_force_close, true), + Socket = MochiReq:get(socket), + mochiweb_socket:recv(Socket, 0, 0), + Result = MochiReq:Type({413, Headers, Args}), + mochiweb_socket:recv(Socket, 0, 0), + Result; respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) -> MochiReq:Type({Code, Headers, Args}). diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl index 6e787514b..2f865c6d2 100644 --- a/src/couch_replicator/src/couch_replicator_httpc.erl +++ b/src/couch_replicator/src/couch_replicator_httpc.erl @@ -28,7 +28,7 @@ -define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})). -define(MAX_WAIT, 5 * 60 * 1000). -define(STREAM_STATUS, ibrowse_stream_status). - +-define(STOP_HTTP_WORKER, stop_http_worker). % This limit is for the number of messages we're willing to discard % from an HTTP stream in clean_mailbox/1 before killing the worker @@ -78,10 +78,14 @@ send_req(HttpDb, Params1, Callback) -> throw:{retry, NewHttpDb0, NewParams0} -> {retry, NewHttpDb0, NewParams0} after - ok = couch_replicator_httpc_pool:release_worker( - HttpDb#httpdb.httpc_pool, - Worker - ), + Pool = HttpDb#httpdb.httpc_pool, + case get(?STOP_HTTP_WORKER) of + stop -> + ok = stop_and_release_worker(Pool, Worker), + erase(?STOP_HTTP_WORKER); + undefined -> + ok = couch_replicator_httpc_pool:release_worker(Pool, Worker) + end, clean_mailbox(Response) end, % This is necessary to keep this tail-recursive. Calling @@ -138,7 +142,7 @@ stop_and_release_worker(Pool, Worker) -> ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Worker). process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) -> - stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker), + put(?STOP_HTTP_WORKER, stop), maybe_retry(sel_conn_closed, Worker, HttpDb, Params); @@ -147,7 +151,7 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) -> %% and closes the socket, ibrowse will detect that error when it sends %% next request. process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) -> - stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker), + put(?STOP_HTTP_WORKER, stop), maybe_retry({error, connection_closing}, Worker, HttpDb, Params); process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) -> @@ -167,6 +171,7 @@ process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) -> ?JSON_DECODE(Json) end, process_auth_response(HttpDb, Ok, Headers, Params), + if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end, Callback(Ok, Headers, EJson); R when R =:= 301 ; R =:= 302 ; R =:= 303 -> backoff_success(HttpDb, Params), @@ -194,6 +199,7 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) -> stream_data_self(HttpDb1, Params, Worker, ReqId, Callback) end, put(?STREAM_STATUS, {streaming, Worker}), + if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end, ibrowse:stream_next(ReqId), try Ret = Callback(Ok, Headers, StreamDataFun), |