summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <vatamane@apache.org>2020-05-27 13:56:44 -0400
committerNick Vatamaniuc <nickva@users.noreply.github.com>2020-05-27 14:02:24 -0400
commit56738359ac92e10187e908e1620fef13476862fe (patch)
treedbfeeef2cecce2998e1addb7a6ece45f849f0ecf
parent19c040f65120898a3cfb54b643e2e49bdd192d02 (diff)
downloadcouchdb-56738359ac92e10187e908e1620fef13476862fe.tar.gz
Introduce _bulk_docs max_doc_count limit
Let users specify the maximum document count for the _bulk_docs requests. If the document count exceeds the maximum it would return a 413 HTTP error. This would also signal the replicator to try to bisect the _bulk_docs array into smaller batches.
-rw-r--r--rel/overlay/etc/default.ini4
-rw-r--r--src/chttpd/src/chttpd.erl2
-rw-r--r--src/chttpd/src/chttpd_db.erl5
-rw-r--r--src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl22
4 files changed, 32 insertions, 1 deletions
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 3630259a1..43e1c0ba3 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -55,6 +55,10 @@ changes_doc_ids_optimization_threshold = 100
; for size calculation instead of 7.
max_document_size = 8000000 ; bytes
;
+; Maximum number of documents in a _bulk_docs request. Anything larger
+; returns a 413 error for the whole request
+;max_bulk_docs_count = 10000
+;
; Maximum attachment size.
; max_attachment_size = infinity
;
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 699601c0e..e8639ed8d 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -956,6 +956,8 @@ error_info(request_entity_too_large) ->
{413, <<"too_large">>, <<"the request entity is too large">>};
error_info({request_entity_too_large, {attachment, AttName}}) ->
{413, <<"attachment_too_large">>, AttName};
+error_info({request_entity_too_large, {bulk_docs, Max}}) when is_integer(Max) ->
+ {413, <<"max_bulk_docs_count_exceeded">>, integer_to_binary(Max)};
error_info({request_entity_too_large, DocID}) ->
{413, <<"document_too_large">>, DocID};
error_info({error, security_migration_updates_disabled}) ->
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 5cfbd1d5f..5af98fe3a 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -484,6 +484,11 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
DocsArray0 ->
DocsArray0
end,
+ MaxDocs = config:get_integer("couchdb", "max_bulk_docs_count", 10000),
+ case length(DocsArray) =< MaxDocs of
+ true -> ok;
+ false -> throw({request_entity_too_large, {bulk_docs, MaxDocs}})
+ end,
couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
Options = case chttpd:header_value(Req, "X-Couch-Full-Commit") of
"true" ->
diff --git a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
index 88e2797a3..2b04050a2 100644
--- a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
@@ -29,6 +29,7 @@ setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
ok = config:set("couchdb", "max_document_size", "50"),
+ ok = config:set("couchdb", "max_bulk_docs_count", "2"),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -39,7 +40,8 @@ setup() ->
teardown(Url) ->
delete_db(Url),
ok = config:delete("admins", ?USER, _Persist=false),
- ok = config:delete("couchdb", "max_document_size").
+ ok = config:delete("couchdb", "max_document_size"),
+ ok = config:delete("couchdb", "max_bulk_docs_count").
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
@@ -67,6 +69,7 @@ all_test_() ->
fun post_single_doc/1,
fun put_single_doc/1,
fun bulk_doc/1,
+ fun bulk_docs_too_many_docs/1,
fun put_post_doc_attach_inline/1,
fun put_multi_part_related/1,
fun post_multi_part_form/1
@@ -100,6 +103,23 @@ bulk_doc(Url) ->
Expect = {[{<<"error">>,<<"document_too_large">>},{<<"reason">>,<<>>}]},
?_assertEqual(Expect, ResultJson).
+
+bulk_docs_too_many_docs(Url) ->
+ Docs = "{\"docs\": ["
+ "{\"doc1\": \"{}\"}, "
+ "{\"doc2\": \"{}\"}, "
+ "{\"doc3\": \"{}\"}"
+ "]}",
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH], Docs),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ ExpectJson = {[
+ {<<"error">>,<<"max_bulk_docs_count_exceeded">>},
+ {<<"reason">>,<<"2">>}
+ ]},
+ ?_assertEqual({413, ExpectJson}, {Code, ResultJson}).
+
+
put_post_doc_attach_inline(Url) ->
Body1 = "{\"body\":\"This is a body.\",",
Body2 = lists:concat(["{\"body\":\"This is a body it should fail",