summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Coglan <james@neighbourhood.ie>2022-03-07 14:52:37 +0000
committerJan Lehnardt <jan@apache.org>2022-03-08 14:00:28 +0100
commitb9a322344aa6e86cf81af61300d697619f545311 (patch)
treeb9f498d52cf6f1a241e25edb0f2e3954664c078d
parent96f9ab839a6206e8c6e6f81c6fc41cd48b601eeb (diff)
downloadcouchdb-b9a322344aa6e86cf81af61300d697619f545311.tar.gz
feat: make the timeout for receiving requests from attachment writers configurable
The code that forwards attachment data to cluster nodes via fabric has a hard-coded timeout of five minutes for nodes to request the data. Making this configurable lets us mitigate the impact of issue #3939 [1], which causes requests to block if one of the nodes already has the given attachment and doesn't end up requesting the data for it. [1]: https://github.com/apache/couchdb/issues/3939
-rw-r--r--rel/overlay/etc/default.ini4
-rw-r--r--src/couch/src/couch_httpd_multipart.erl7
2 files changed, 9 insertions, 2 deletions
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 3c15ae92c..6b64c6d74 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -80,6 +80,10 @@ view_index_dir = {{view_index_dir}}
; Allow edits on the _security object in the user db. By default, it's disabled.
;users_db_security_editable = false
+; Sets the maximum time that the coordinator node will wait for cluster members
+; to request attachment data before returning a response to the client.
+;attachment_writer_timeout = 300000
+
[purge]
; Allowed maximum number of documents in one purge request
;max_document_id_number = 100
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index ecdf10562..95a2c9e3c 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -122,7 +122,7 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
mp_parse_atts(eof, NewAcc)
end
- after 300000 ->
+ after att_writer_timeout() ->
ok
end
end.
@@ -198,7 +198,7 @@ maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
{get_bytes, Ref, X} ->
C2 = update_writer(X, Counters),
maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | NewWaiting]})
- after 300000 ->
+ after att_writer_timeout() ->
abort_parsing
end
end
@@ -243,6 +243,9 @@ num_mp_writers() ->
Count -> Count
end.
+att_writer_timeout() ->
+ config:get_integer("couchdb", "attachment_writer_timeout", 300000).
+
encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) ->
WriteFun(JsonBytes);
encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) ->