summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/couch/test/couch_passwords_tests.erl2
-rw-r--r--src/mem3/test/mem3_rep_test.erl17
-rw-r--r--src/mem3/test/mem3_reshard_api_test.erl121
-rw-r--r--src/mem3/test/mem3_reshard_changes_feed_test.erl9
-rw-r--r--src/mem3/test/mem3_reshard_test.erl37
-rw-r--r--src/mem3/test/mem3_sync_security_test.erl48
6 files changed, 130 insertions, 104 deletions
diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl
index dea6d6b7b..88de8530f 100644
--- a/src/couch/test/couch_passwords_tests.erl
+++ b/src/couch/test/couch_passwords_tests.erl
@@ -46,7 +46,7 @@ pbkdf2_test_()->
<<"sa\0lt">>,
4096, 16))},
- {timeout, 180, %% this may runs too long on slow hosts
+ {timeout, 600, %% this may runs too long on slow hosts
{"Iterations: 16777216 - this may take some time",
?_assertEqual(
{ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
diff --git a/src/mem3/test/mem3_rep_test.erl b/src/mem3/test/mem3_rep_test.erl
index 7e8856f7c..4a46e7b93 100644
--- a/src/mem3/test/mem3_rep_test.erl
+++ b/src/mem3/test/mem3_rep_test.erl
@@ -20,6 +20,7 @@
-define(ID, <<"_id">>).
+-define(TIMEOUT, 60). % seconds
setup() ->
{AllSrc, AllTgt} = {?tempdb(), ?tempdb()},
@@ -65,7 +66,7 @@ mem3_reshard_db_test_() ->
replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{docs => 10, delete => [5, 9]},
add_test_docs(AllSrc, DocSpec),
SDocs = get_all_docs(AllSrc),
@@ -79,11 +80,11 @@ replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) ->
?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
?assertEqual(SDocs, get_all_docs(AllTgt))
- end).
+ end)}.
replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{docs => 10, delete => [5, 9]},
add_test_docs(AllSrc, DocSpec),
SDocs = get_all_docs(AllSrc),
@@ -97,11 +98,11 @@ replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) ->
?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
?assertEqual(SDocs, get_all_docs(AllTgt))
- end).
+ end)}.
replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{docs => 10, delete => [5, 9]},
add_test_docs(AllSrc, DocSpec),
SDocs = get_all_docs(AllSrc),
@@ -122,11 +123,11 @@ replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) ->
?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)),
?assertEqual(SDocs, get_all_docs(AllTgt))
- end).
+ end)}.
replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{
pdocs => #{
<<"PX">> => 15,
@@ -149,7 +150,7 @@ replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) ->
?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)),
?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)),
?assertEqual(SDocs, get_all_docs(PartTgt))
- end).
+ end)}.
get_partition_info(DbName, Partition) ->
diff --git a/src/mem3/test/mem3_reshard_api_test.erl b/src/mem3/test/mem3_reshard_api_test.erl
index 982fed173..c4df24ad3 100644
--- a/src/mem3/test/mem3_reshard_api_test.erl
+++ b/src/mem3/test/mem3_reshard_api_test.erl
@@ -27,6 +27,7 @@
-define(STATE, "_reshard/state").
-define(ID, <<"id">>).
-define(OK, <<"ok">>).
+-define(TIMEOUT, 60). % seconds
setup() ->
@@ -110,7 +111,7 @@ mem3_reshard_api_test_() ->
basics({Top, _}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% GET /_reshard
?assertMatch({200, #{
<<"state">> := <<"running">>,
@@ -137,11 +138,11 @@ basics({Top, _}) ->
?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")),
?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})),
?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope}))
- end).
+ end)}.
create_job_basic({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
% POST /_reshard/jobs
{C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
?assertEqual(201, C1),
@@ -192,11 +193,11 @@ create_job_basic({Top, {Db1, _, _}}) ->
% DELETE /_reshard/jobs/$jobid should be a 404 as well
?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id)))
- end).
+ end)}.
create_two_jobs({Top, {Db1, Db2, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
?assertMatch({201, [#{?OK := true}]},
@@ -218,20 +219,20 @@ create_two_jobs({Top, {Db1, Db2, _}}) ->
?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)),
{200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)),
?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD))
- end).
+ end)}.
create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
{C1, R1} = req(post, Jobs, #{type => split, db => Db3}),
?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}),
?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD))
- end).
+ end)}.
start_stop_cluster_basic({Top, _}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Url = Top ++ ?STATE,
?assertMatch({200, #{
@@ -261,11 +262,11 @@ start_stop_cluster_basic({Top, _}) ->
}}, req(get, Top ++ ?RESHARD)),
?assertMatch({200, _}, req(put, Url, #{state => running})),
?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url))
- end).
+ end)}.
test_disabled({Top, _}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
application:set_env(mem3, reshard_disabled, true),
?assertMatch({501, _}, req(get, Top ++ ?RESHARD)),
?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})),
@@ -273,11 +274,11 @@ test_disabled({Top, _}) ->
application:unset_env(mem3, reshard_disabled),
?assertMatch({200, _}, req(get, Top ++ ?RESHARD)),
?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running}))
- end).
+ end)}.
start_stop_cluster_with_a_job({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Url = Top ++ ?STATE,
?assertMatch({200, _}, req(put, Url, #{state => stopped})),
@@ -316,11 +317,11 @@ start_stop_cluster_with_a_job({Top, {Db1, _, _}}) ->
?assertMatch({200, _}, req(put, Url, #{state => running})),
?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}}
when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2)))
- end).
+ end)}.
individual_job_start_stop({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
intercept_state(topoff1),
Body = #{type => split, db => Db1},
@@ -352,11 +353,11 @@ individual_job_start_stop({Top, {Db1, _, _}}) ->
% Let it continue running and it should complete eventually
JobPid2 ! continue,
wait_state(StUrl, <<"completed">>)
- end).
+ end)}.
individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
intercept_state(topoff1),
Body = #{type => split, db => Db1},
@@ -397,11 +398,11 @@ individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) ->
% Let it continue running and it should complete eventually
JobPid2 ! continue,
wait_state(StUrl, <<"completed">>)
- end).
+ end)}.
create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
% Nothing in the body
@@ -434,11 +435,11 @@ create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
% Can't have both db and shard
?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1,
shard => <<"blah">>}))
- end).
+ end)}.
create_job_with_db({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
Body1 = #{type => split, db => Db1},
@@ -465,11 +466,11 @@ create_job_with_db({Top, {Db1, _, _}}) ->
[16#80000000, 16#bfffffff],
[16#c0000000, 16#ffffffff]
], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))])
- end).
+ end)}.
create_job_with_shard_name({Top, {_, _, Db3}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
[S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
@@ -490,11 +491,11 @@ create_job_with_shard_name({Top, {_, _, Db3}}) ->
[16#80000000, 16#bfffffff],
[16#c0000000, 16#ffffffff]
], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))])
- end).
+ end)}.
completed_job_handling({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
% Run job to completion
@@ -542,109 +543,109 @@ completed_job_handling({Top, {Db1, _, _}}) ->
?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
?assertMatch({200, #{?OK := true}}, req(delete, JobUrl))
- end).
+ end)}.
handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, topoff1),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, initial_copy),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, copy_local_docs),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, build_indices),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, update_shardmap),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = delete_source_in_state(Top, Db1, wait_source_close),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end).
+ end)}.
recover_in_topoff1({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, topoff1),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_initial_copy({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, initial_copy),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_copy_local_docs({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, copy_local_docs),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_build_indices({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, build_indices),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_update_shard_map({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, update_shardmap),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_wait_source_close({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, wait_source_close),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_topoff3({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, topoff3),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
recover_in_source_delete({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
JobId = recover_in_state(Top, Db1, source_delete),
wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end).
+ end)}.
check_max_jobs({Top, {Db1, Db2, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
config:set("reshard", "max_jobs", "0", _Persist=false),
@@ -678,11 +679,11 @@ check_max_jobs({Top, {Db1, Db2, _}}) ->
% Jobs that have been created already are not removed if max jobs is lowered
% so make sure the job completes
wait_to_complete(Top, R4)
- end).
+ end)}.
check_node_and_range_required_params({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Jobs = Top ++ ?JOBS,
Node = atom_to_binary(node(), utf8),
@@ -704,18 +705,18 @@ check_node_and_range_required_params({Top, {Db1, _, _}}) ->
{C3, R3} = req(post, Jobs, Body),
?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
wait_to_complete_then_cleanup(Top, R3)
- end).
+ end)}.
cleanup_completed_jobs({Top, {Db1, _, _}}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
Body = #{type => split, db => Db1},
{201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
wait_state(JobUrl ++ "/state", <<"completed">>),
delete_db(Top, Db1),
wait_for_http_code(JobUrl, 404)
- end).
+ end)}.
% Test help functions
diff --git a/src/mem3/test/mem3_reshard_changes_feed_test.erl b/src/mem3/test/mem3_reshard_changes_feed_test.erl
index 52e18fb26..4b9e2a34a 100644
--- a/src/mem3/test/mem3_reshard_changes_feed_test.erl
+++ b/src/mem3/test/mem3_reshard_changes_feed_test.erl
@@ -17,6 +17,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/src/mem3_reshard.hrl").
+-define(TIMEOUT, 60). % seconds
-define(assertChanges(Expected, Received),
begin
@@ -66,7 +67,7 @@ mem3_reshard_changes_feed_test_() ->
normal_feed_should_work_after_split(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{
docs => [1, 10],
delete => [5, 6]
@@ -141,11 +142,11 @@ normal_feed_should_work_after_split(#{db1 := Db}) ->
?assertEqual(4, length(Changes7)),
[#{seq := Seq7} | _] = Changes7,
?assertEqual(EndSeq7, Seq7)
- end).
+ end)}.
continuous_feed_should_work_during_split(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
{UpdaterPid, UpdaterRef} = spawn_monitor(fun() ->
Updater = fun U({State, I}) ->
receive
@@ -233,7 +234,7 @@ continuous_feed_should_work_during_split(#{db1 := Db}) ->
DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges],
ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)],
?assertEqual(ExpectedDocIDs, lists:usort(DocIDs))
- end).
+ end)}.
split_and_wait(Db) ->
diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/mem3_reshard_test.erl
index 8c4479656..ab6202115 100644
--- a/src/mem3/test/mem3_reshard_test.erl
+++ b/src/mem3/test/mem3_reshard_test.erl
@@ -19,6 +19,7 @@
-include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
-define(ID, <<"_id">>).
+-define(TIMEOUT, 60).
setup() ->
HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
@@ -81,7 +82,7 @@ mem3_reshard_db_test_() ->
% This is a basic test to check that shard splitting preserves documents, and
% db meta props like revs limits and security.
split_one_shard(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
add_test_docs(Db, DocSpec),
@@ -135,13 +136,13 @@ split_one_shard(#{db1 := Db}) ->
% Don't forget about the local but don't include internal checkpoints
% as some of those are munged and transformed during the split
?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end).
+ end)}.
% This test checks that document added while the shard is being split are not
% lost. Topoff1 state happens before indices are built
update_docs_before_topoff1(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
add_test_docs(Db, #{docs => 10}),
intercept_state(topoff1),
@@ -177,12 +178,12 @@ update_docs_before_topoff1(#{db1 := Db}) ->
?assertEqual(Docs0, Docs1),
?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end).
+ end)}.
% This test that indices are built during shard splitting.
indices_are_built(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
@@ -206,7 +207,7 @@ indices_are_built(#{db1 := Db}) ->
% 4 because there are 2 indices and 2 target shards
?assertEqual(4, meck:num_calls(hastings_index, await, 2))
end
- end).
+ end)}.
mock_dreyfus_indices() ->
@@ -238,7 +239,7 @@ mock_hastings_indices() ->
% Split partitioned database
split_partitioned_db(#{db2 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{
pdocs => #{
<<"PX">> => 5,
@@ -304,14 +305,14 @@ split_partitioned_db(#{db2 := Db}) ->
% Don't forget about the local but don't include internal checkpoints
% as some of those are munged and transformed during the split
?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end).
+ end)}.
% Make sure a shard can be split again after it was split once. This checks that
% too many got added to some range, such that on next split they'd fail to fit
% in to any of the new target ranges.
split_twice(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
add_test_docs(Db, DocSpec),
@@ -390,11 +391,11 @@ split_twice(#{db1 := Db}) ->
?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
?assertEqual(Docs1, Docs2),
?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
- end).
+ end)}.
couch_events_are_emitted(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
couch_event:register_all(self()),
% Split the one shard
@@ -425,11 +426,11 @@ couch_events_are_emitted(#{db1 := Db}) ->
StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
couch_event:unregister(self())
- end).
+ end)}.
retries_work(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
meck:expect(couch_db_split, split, fun(_, _, _) ->
error(kapow)
end),
@@ -439,11 +440,11 @@ retries_work(#{db1 := Db}) ->
wait_state(JobId, failed),
?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
- end).
+ end)}.
target_reset_in_initial_copy(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
Job = #job{
source = Src,
@@ -465,17 +466,17 @@ target_reset_in_initial_copy(#{db1 := Db}) ->
exit(JobPid, kill),
exit(BogusParent, kill),
?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
- end).
+ end)}.
split_an_incomplete_shard_map(#{db1 := Db}) ->
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
[#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
[#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
meck:expect(mem3_util, calculate_max_n, 1, 0),
?assertMatch({error, {not_enough_shard_copies, _}},
mem3_reshard:start_split_job(Shard))
- end).
+ end)}.
intercept_state(State) ->
diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/mem3_sync_security_test.erl
index 4e06dd807..e67a72017 100644
--- a/src/mem3/test/mem3_sync_security_test.erl
+++ b/src/mem3/test/mem3_sync_security_test.erl
@@ -17,16 +17,38 @@
-include("mem3.hrl").
-include_lib("eunit/include/eunit.hrl").
-go_test() ->
- Ctx = test_util:start_couch([fabric, mem3]),
- try
- ok = meck:new(fabric, [passthrough]),
- meck:expect(fabric, all_dbs, fun() ->
- {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
- end),
- Result = mem3_sync_security:go(),
- ?assertEqual(ok, Result)
- after
- meck:unload(),
- test_util:stop_couch(Ctx)
- end.
+-define(TIMEOUT, 5). % seconds
+
+go_test_() ->
+ {
+ "security property sync test",
+ {
+ setup,
+ fun start_couch/0, fun stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun sync_security_ok/1
+ ]
+ }
+ }
+ }.
+
+start_couch() ->
+ test_util:start_couch([fabric, mem3]).
+
+stop_couch(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+setup() ->
+ ok = meck:new(fabric, [passthrough]),
+ meck:expect(fabric, all_dbs, fun() ->
+ {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
+ end).
+
+teardown(_) ->
+ meck:unload().
+
+sync_security_ok(_) ->
+ {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}.