summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Newson <rnewson@apache.org>2020-11-06 16:41:50 +0000
committerGitHub <noreply@github.com>2020-11-06 16:41:50 +0000
commit3c8490efa8dc8196779ebdf9e1c0766f1d313593 (patch)
tree8e8e7288a33583a290571f762ca3703004d62c9b
parent077b09cb21dc8dbb6d52d8d81b2afc33bca835fe (diff)
parent1218c536a40489a17229445c5f1c54ec91bac162 (diff)
downloadcouchdb-3c8490efa8dc8196779ebdf9e1c0766f1d313593.tar.gz
Merge pull request #3249 from apache/changes_filter_all_docs_oom_3.x
Retry filter_docs sequentially if the patch exceeds couchjs stack
-rw-r--r--src/couch/src/couch_query_servers.erl14
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl55
2 files changed, 68 insertions, 1 deletions
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 447daea61..6649df364 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -495,9 +495,21 @@ filter_docs(Req, Db, DDoc, FName, Docs) ->
end,
Options = json_doc_options(),
JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
+ try
+ {ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)}
+ catch
+ throw:{os_process_error,{exit_status,1}} ->
+ %% batch used too much memory, retry sequentially.
+ Fun = fun(JsonDoc) ->
+ filter_docs_int(DDoc, FName, JsonReq, [JsonDoc])
+ end,
+ {ok, lists:flatmap(Fun, JsonDocs)}
+ end.
+
+filter_docs_int(DDoc, FName, JsonReq, JsonDocs) ->
[true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
[JsonDocs, JsonReq]),
- {ok, Passes}.
+ Passes.
ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
index f8df896c4..440fc8e1b 100644
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ b/src/couch/test/eunit/couch_query_servers_tests.erl
@@ -12,6 +12,7 @@
-module(couch_query_servers_tests).
+-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
@@ -23,6 +24,15 @@ teardown(_) ->
meck:unload().
+setup_oom() ->
+ test_util:start_couch([ioq]).
+
+
+teardown_oom(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
sum_overflow_test_() ->
{
"Test overflow detection in the _sum reduce function",
@@ -39,6 +49,19 @@ sum_overflow_test_() ->
}.
+filter_oom_test_() ->
+{
+ "Test recovery from oom in filters",
+ {
+ setup,
+ fun setup_oom/0,
+ fun teardown_oom/1,
+ [
+ fun should_split_large_batches/0
+ ]
+ }
+}.
+
should_return_error_on_overflow() ->
meck:reset([config, couch_log]),
meck:expect(
@@ -85,6 +108,38 @@ should_return_object_on_false() ->
?assertNot(meck:called(couch_log, error, '_')).
+should_split_large_batches() ->
+ Req = {json_req, {[]}},
+ Db = undefined,
+ DDoc = #doc{
+ id = <<"_design/foo">>,
+ revs = {0, [<<"bork bork bork">>]},
+ body = {[
+ {<<"filters">>, {[
+ {<<"bar">>, <<"function(req, doc) {return true;}">>}
+ ]}}
+ ]}
+ },
+ FName = <<"bar">>,
+ Docs = [
+ #doc{id = <<"a">>, body = {[]}},
+ #doc{id = <<"b">>, body = {[]}}
+ ],
+ meck:new(couch_os_process, [passthrough]),
+ meck:expect(couch_os_process, prompt, fun(Pid, Data) ->
+ case Data of
+ [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_, _], _]] ->
+ throw({os_process_error, {exit_status, 1}});
+ [<<"ddoc">>, _, [<<"filters">>, <<"bar">>], [[_], _]] ->
+ [true, [split_batch]];
+ _ ->
+ meck:passthrough([Pid, Data])
+ end
+ end),
+ {ok, Ret} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
+ ?assertEqual([split_batch, split_batch], Ret).
+
+
gen_sum_kvs() ->
lists:map(fun(I) ->
Props = lists:map(fun(_) ->