summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <vatamane@gmail.com>2022-08-18 21:41:14 -0400
committerNick Vatamaniuc <nickva@users.noreply.github.com>2022-08-19 13:18:59 -0400
commit609e7cc79c9001eb10bda16453f7fda3c64d8e78 (patch)
treeaa9f32396721b3dd94be0d1dfc9de58e3168a8b5
parent8e6158972149eb01548a28cf8c4f82d739b621ee (diff)
downloadcouchdb-609e7cc79c9001eb10bda16453f7fda3c64d8e78.tar.gz
Update couch_replicator_small_max_request_size_target
Use the clustered version of the source and target endoints and switch to using common test setup and teardown function functions. Overall it added to quite a few number of lines saved.
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl158
1 files changed, 43 insertions, 115 deletions
diff --git a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
index 3b020927d..4a905850d 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
@@ -2,137 +2,61 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
--import(couch_replicator_test_helper, [
- db_url/1,
- replicate/1,
- compare_dbs/3
-]).
+-include("couch_replicator_test.hrl").
-define(TIMEOUT_EUNIT, 360).
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- ok = couch_db:close(Db),
- DbName.
-
-setup(remote) ->
- {remote, setup()};
-setup({A, B}) ->
- Ctx = test_util:start_couch([couch_replicator]),
- config:set("chttpd", "max_http_request_size", "10000", false),
- Source = setup(A),
- Target = setup(B),
- {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
- teardown(DbName);
-teardown(DbName) ->
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok.
-
-teardown(_, {Ctx, {Source, Target}}) ->
- teardown(Source),
- teardown(Target),
- ok = application:stop(couch_replicator),
- ok = test_util:stop_couch(Ctx).
-
reduce_max_request_size_test_() ->
- Pairs = [{remote, remote}],
{
"Replicate docs when target has a small max_http_request_size",
{
- foreachx,
- fun setup/1,
- fun teardown/2,
+ foreach,
+ fun couch_replicator_test_helper:test_setup/0,
+ fun couch_replicator_test_helper:test_teardown/1,
[
- {Pair, fun should_replicate_all_docs/2}
- || Pair <- Pairs
- ] ++
- [
- {Pair, fun should_replicate_one/2}
- || Pair <- Pairs
- ] ++
- % Disabled. See issue 574. Sometimes PUTs with a doc and
- % attachment which exceed maximum request size are simply
- % closed instead of returning a 413 request. That makes these
- % tests flaky.
- [
- {Pair, fun should_replicate_one_with_attachment/2}
- || Pair <- Pairs
- ]
+ ?TDEF_FE(should_replicate_all_docs, ?TIMEOUT_EUNIT),
+ ?TDEF_FE(should_replicate_one, ?TIMEOUT_EUNIT),
+ ?TDEF_FE(should_replicate_one_with_attachment, ?TIMEOUT_EUNIT)
+ ]
}
}.
% Test documents which are below max_http_request_size but when batched, batch size
% will be greater than max_http_request_size. Replicator could automatically split
% the batch into smaller batches and POST those separately.
-should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [])
- ]}
- }.
+should_replicate_all_docs({_Ctx, {Source, Target}}) ->
+ config:set("chttpd", "max_http_request_size", "10000", false),
+ populate_source(Source),
+ replicate(Source, Target),
+ compare(Source, Target, []).
% If a document is too large to post as a single request, that document is
% skipped but replication overall will make progress and not crash.
-should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source_one_large_one_small(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])
- ]}
- }.
+should_replicate_one({_Ctx, {Source, Target}}) ->
+ config:set("chttpd", "max_http_request_size", "10000", false),
+ populate_source_one_large_one_small(Source),
+ replicate(Source, Target),
+ compare(Source, Target, [<<"doc0">>]).
% If a document has an attachment > 64 * 1024 bytes, replicator will switch to
% POST-ing individual documents directly and skip bulk_docs. Test that case
% separately
% See note in main test function why this was disabled.
-should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
- {
- lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source_one_large_attachment(Source),
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])
- ]}
- }.
+should_replicate_one_with_attachment({_Ctx, {Source, Target}}) ->
+ config:set("chttpd", "max_http_request_size", "10000", false),
+ populate_source_one_large_attachment(Source),
+ populate_source(Source),
+ replicate(Source, Target),
+ compare(Source, Target, [<<"doc0">>]).
+
+populate_source(Source) ->
+ add_docs(Source, 5, 3000, 0).
+
+populate_source_one_large_one_small(Source) ->
+ one_large_one_small(Source, 12000, 3000).
-should_populate_source({remote, Source}) ->
- should_populate_source(Source);
-should_populate_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}.
-
-should_populate_source_one_large_one_small({remote, Source}) ->
- should_populate_source_one_large_one_small(Source);
-should_populate_source_one_large_one_small(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
-
-should_populate_source_one_large_attachment({remote, Source}) ->
- should_populate_source_one_large_attachment(Source);
-should_populate_source_one_large_attachment(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
-
-should_replicate({remote, Source}, Target) ->
- should_replicate(db_url(Source), Target);
-should_replicate(Source, {remote, Target}) ->
- should_replicate(Source, db_url(Target));
-should_replicate(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
-should_compare_databases({remote, Source}, Target, ExceptIds) ->
- should_compare_databases(Source, Target, ExceptIds);
-should_compare_databases(Source, {remote, Target}, ExceptIds) ->
- should_compare_databases(Source, Target, ExceptIds);
-should_compare_databases(Source, Target, ExceptIds) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}.
+populate_source_one_large_attachment(Source) ->
+ one_large_attachment(Source, 70000, 70000).
binary_chunk(Size) when is_integer(Size), Size > 0 ->
<<<<"x">> || _ <- lists:seq(1, Size)>>.
@@ -155,11 +79,9 @@ one_large_attachment(DbName, Size, AttSize) ->
add_doc(DbName, <<"doc0">>, Size, AttSize).
add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->
- {ok, Db} = couch_db:open_int(DbName, []),
Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}},
Doc = Doc0#doc{atts = atts(AttSize)},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db).
+ {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]).
atts(0) ->
[];
@@ -173,12 +95,18 @@ atts(Size) ->
])
].
+db_url(DbName) ->
+ couch_replicator_test_helper:cluster_db_url(DbName).
+
replicate(Source, Target) ->
- replicate(
+ couch_replicator_test_helper:replicate(
{[
- {<<"source">>, Source},
- {<<"target">>, Target},
- % This make batch_size predictable
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ % This makes batch_size more predictable
{<<"worker_processes">>, "1"}
]}
).
+
+compare(Source, Target, ExceptIds) ->
+ couch_replicator_test_helper:cluster_compare_dbs(Source, Target, ExceptIds).