summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTony Sun <tony.sun427@gmail.com>2022-05-02 22:04:24 -0700
committerGitHub <noreply@github.com>2022-05-02 22:04:24 -0700
commit160d02d8ce0dea75c9c3cec2725b1a4d939fe9dc (patch)
treeae970932993cb4e50fbbe5e26cd2fa425184d564
parent0e38f390796d6e34f698853f5886a36607e2aeec (diff)
downloadcouchdb-160d02d8ce0dea75c9c3cec2725b1a4d939fe9dc.tar.gz
use undefined for timed out responses (#3979)
* fix badargs for timed out responses Under heavy load, fabric_update_doc workers will return timeout via rexi. Therefore no reponses will be populate the response dictionary. This leads to badargs when we do dict:fetch for keys that do not exist. This fix corrects this behavior by ensuring that each document update must receive a response. If one document does not have a response, then the entire update returns a 500.
-rw-r--r--src/chttpd/src/chttpd_db.erl21
-rw-r--r--src/couch/src/couch_util.erl14
-rw-r--r--src/fabric/src/fabric.erl2
-rw-r--r--src/fabric/src/fabric_doc_update.erl22
4 files changed, 54 insertions, 5 deletions
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index cfae8ad5f..9c9c4ef87 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -633,6 +633,15 @@ db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>], user_ctx = Ct
Results
),
send_json(Req, 202, DocResults);
+ {error, Results} ->
+ % output the results
+ chttpd_stats:incr_writes(length(Results)),
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 500, DocResults);
{aborted, Errors} ->
ErrorsJson =
lists:map(fun update_doc_result_to_json/1, Errors),
@@ -647,7 +656,11 @@ db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>], user_ctx = Ct
{accepted, Errors} ->
chttpd_stats:incr_writes(length(Docs)),
ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 202, ErrorsJson)
+ send_json(Req, 202, ErrorsJson);
+ {error, Errors} ->
+ chttpd_stats:incr_writes(length(Docs)),
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 500, ErrorsJson)
end;
_ ->
throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
@@ -1472,6 +1485,12 @@ receive_request_data(Req, LenLeft) when LenLeft > 0 ->
receive_request_data(_Req, _) ->
throw(<<"expected more data">>).
+update_doc_result_to_json({error, _} = Error) ->
+ {_Code, Err, Msg} = chttpd:error_info(Error),
+ {[
+ {error, Err},
+ {reason, Msg}
+ ]};
update_doc_result_to_json({{Id, Rev}, Error}) ->
{_Code, Err, Msg} = chttpd:error_info(Error),
{[
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index f942b708c..e03a50e1d 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -24,7 +24,7 @@
-export([json_encode/1, json_decode/1, json_decode/2]).
-export([verify/2, simple_call/2, shutdown_sync/1]).
-export([get_value/2, get_value/3]).
--export([reorder_results/2]).
+-export([reorder_results/2, reorder_results/3]).
-export([url_strip_password/1]).
-export([encode_doc_id/1]).
-export([normalize_ddoc_id/1]).
@@ -530,6 +530,18 @@ reorder_results(Keys, SortedResults) ->
KeyDict = dict:from_list(SortedResults),
[dict:fetch(Key, KeyDict) || Key <- Keys].
+reorder_results(Keys, SortedResults, Default) when length(Keys) < 100 ->
+ [couch_util:get_value(Key, SortedResults, Default) || Key <- Keys];
+reorder_results(Keys, SortedResults, Default) ->
+ KeyDict = dict:from_list(SortedResults),
+ DefaultFunc = fun({Key, Dict}) ->
+ case dict:is_key(Key, Dict) of
+ true -> dict:fetch(Key, Dict);
+ false -> Default
+ end
+ end,
+ [DefaultFunc({Key, KeyDict}) || Key <- Keys].
+
url_strip_password(Url) ->
re:replace(
Url,
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 5840cd63c..3e61920e0 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -827,7 +827,7 @@ setup() ->
ok = meck:expect(
couch_util,
reorder_results,
- fun(_, [{_, Res}]) ->
+ fun(_, [{_, Res}], _) ->
[Res]
end
),
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index 1360bda81..4cf1ccfc3 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -42,7 +42,7 @@ go(DbName, AllDocs0, Opts) ->
{ok, {Health, Results}} when
Health =:= ok; Health =:= accepted; Health =:= error
->
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
+ ensure_all_responses(Health, AllDocs, Results);
{timeout, Acc} ->
{_, _, W1, GroupedDocs1, DocReplDict} = Acc,
{DefunctWorkers, _} = lists:unzip(GroupedDocs1),
@@ -52,7 +52,7 @@ go(DbName, AllDocs0, Opts) ->
{ok, W1, []},
DocReplDict
),
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
+ ensure_all_responses(Health, AllDocs, Resp);
Else ->
Else
after
@@ -193,6 +193,24 @@ maybe_reply(Doc, Replies, {stop, W, Acc}) ->
continue
end.
+% this ensures that we got some response for all documents being updated
+ensure_all_responses(Health, AllDocs, Resp) ->
+ Results = [
+ R
+ || R <- couch_util:reorder_results(
+ AllDocs,
+ Resp,
+ {error, internal_server_error}
+ ),
+ R =/= noreply
+ ],
+ case lists:member({error, internal_server_error}, Results) of
+ true ->
+ {error, Results};
+ false ->
+ {Health, Results}
+ end.
+
% This is a corner case where
% 1) revision tree for the document are out of sync across nodes
% 2) update on one node extends the revision tree